def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar.readonly') try: page_token = None while True: calendar_list = service.calendarList().list(pageToken=page_token).execute() for calendar_list_entry in calendar_list['items']: print('{} {}').format(calendar_list_entry['summary'], calendar_list_entry['id'] ) page_token = calendar_list.get('nextPageToken') if not page_token: break except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(startDate, endDate, line, argv): #print args.org_id # TODO determine how to 'argv' and 'parents=[argparser]' for passing function inputs service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # extract data for 2 of 3 search types. video search excluded searchType = ['image', 'web'] # extract data for single day see https://developers.google.com/webmaster-tools/v3/searchanalytics/query#auth results = [] for search in searchType: request = { 'startDate': startDate, 'endDate': endDate, 'dimensions': ['date', 'device', 'page'], 'searchType': search, 'rowLimit': 5000 } # call execute_request function responses = execute_request(service, line, request) results.append(responses) return results
def upload(payload): service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') posts = service.posts() request = posts.insert(blogId="485055035283972076", body=payload, isDraft = True, fetchImages = True) request.execute()
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangebuyer', 'v1.3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.buyer') account_id = flags.account_id agency_id = flags.agency_id buyer_creative_id = flags.buyer_creative_id try: # Create a new creative to submit. creative_body = { 'accountId': account_id, 'buyerCreativeId': buyer_creative_id, 'HTMLSnippet': ('<html><body><a href="http://www.google.com">' 'Hi there!</a></body></html>'), 'clickThroughUrl': ['http://www.google.com'], 'width': 300, 'height': 250, 'advertiserName': 'google' } if agency_id: creative_body['agencyId'] = agency_id creative = service.creatives().insert(body=creative_body).execute() # Print the response. If the creative has been already reviewed, its status # and categories will be included in the response. pprint.pprint(creative) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v1.3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/dfareporting') profile_id = flags.profile_id try: # Create a new report resource to insert report = { 'name': 'Example Standard Report', 'type': 'STANDARD', 'criteria': { 'dateRange': {'relativeDateRange': 'YESTERDAY'}, 'dimensions': [{'name': 'dfa:campaign'}], 'metricNames': ['dfa:clicks'] } } # Construct the request. request = service.reports().insert(profileId=profile_id, body=report) # Execute request and print response. pprint.pprint(request.execute()) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'analytics', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/analytics.readonly') # Try to make a request to the API. Print the results or handle errors. try: first_profile_id = get_first_profile_id(service) if not first_profile_id: print('Could not find a valid profile for this user.') else: results = get_top_keywords(service, first_profile_id) print_results(results) except TypeError as error: # Handle errors in constructing a query. print(('There was an error in constructing your query : %s' % error)) except HttpError as error: # Handle API errors. print(('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))) except AccessTokenRefreshError: # Handle Auth errors. print('The credentials have been revoked or expired, please re-run ' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'plus', 'v1', __doc__, __file__, scope='https://www.googleapis.com/auth/plus.me') try: person = service.people().get(userId='me').execute() print 'Got your ID: %s' % person['displayName'] print print '%-040s -> %s' % ('[Activitity ID]', '[Content]') # Don't execute the request until we reach the paging loop below. request = service.activities().list( userId=person['id'], collection='public') # Loop over every activity and print the ID and a short snippet of content. while request is not None: activities_doc = request.execute() for item in activities_doc.get('items', []): print '%-040s -> %s' % (item['id'], item['object']['content'][:30]) request = service.activities().list_next(request, activities_doc) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(argv): service, flags = sample_tools.init(argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') users = service.users() blogs = service.blogs() posts = service.posts() path_to_watch = "C:\Users\Sam " \ "Letcher\PycharmProjects\Enabling-Engineering-Blog\\blog" before = dict ([(f, None) for f in os.listdir (path_to_watch)]) while 1: time.sleep (5) after = dict ([(f, None) for f in os.listdir (path_to_watch)]) new = [f for f in after if not f in before] if new: try: file_list = parse_files(new) # Retrieve the list of Blogs this user has write privileges on this_users_blogs = blogs.listByUser(userId='self').execute() for blog in this_users_blogs['items']: id = blog['id'] for key in file_list: if file_list[key] != "": # Setup content format content = blogger_connector.content_setup(id, key[:-4], file_list[key]) # Publish a draft page new_post = blogger_connector.create_draft_post(posts, id, content) print(new_post) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize') before = after
def main(argv): # Set up the optional and required arguments for posting to blogger # the add_help is set to false as there will be a conflict with the # package sample_tools_init() which also provide a --help argument. # That is also why the help is hardcoded below too. parent = argparse.ArgumentParser(add_help=False, conflict_handler='resolve') group = parent.add_argument_group('standard') parent.add_argument("-t", "--title", help="Title for the blog post") parent.add_argument("-l", "--labels", help="Labels for the blog post") parent.add_argument("-b", "--blogs", help="List all your blogs and get their ID", action="store_true") parent.add_argument("-s", "--src", help="Sourcefile for the blog post content") args = parent.parse_args() if args.blogs: print("Look for blogIDs (Not yet implemented)") exit(1) if (len(sys.argv) <= 1): print('Usage: $ python blogger.py \n\n\t\ --title "TITLE" (Title for the blog post)\n\t\ --labels "LABEL1, LABEL2" (Labels for the blog post)\n\t\ --src SOURCEFILE (Sourcefile for the blog post content)') print('\nYou can also use it to grab your blogIDs:\n\n\ $ python blogger.py --blogs (List all your blogs and get their ID)') exit(1) # Assign argument values to variables flags = parent.parse_args(argv[1:]) blogtitle = flags.title blogtags = flags.labels blogfilename = flags.src try: with open(blogfilename, 'rb') as f: blogcontent = f.read() f.close() except: print("Sourcefile not found.") exit(1) # Authenticate and construct service, also inclue argparse arguments service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger', parents=[parent]) try: # Make the POST request posts = service.posts() request = posts.insert(blogId=myblogid, body= {"kind": "blogger#post", "blog": {"id": myblogid}, "title": blogtitle, "labels": blogtags, "content": blogcontent}) response = request.execute() print ('Posted: %s (%s)' % (response['title'], response['url'])) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def main(argv): cgitb.enable() print('Content-type: text/html;\r\n') form = cgi.FieldStorage() service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[], scope='https://www.googleapis.com/auth/webmasters.readonly') print_row_array = [] row2 = [] request = __get_request(form) threads = [] if isinstance(form['property_uri[]'], list): property_uri_list = form['property_uri[]'] for uri in property_uri_list: if isinstance(request, list): for requestdata in request: thread(threads, service, uri.value, requestdata, form, print_row_array) else: thread(threads, service, uri.value, request, form, print_row_array) else: if isinstance(request, list): for requestdata in request: thread(threads, service, form['property_uri[]'].value, requestdata, form, print_row_array) else: thread(threads, service, form['property_uri[]'].value, request, form, print_row_array) write_rows(print_row_array) print("done")
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v1.3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/dfareporting') profile_id = flags.profile_id report_id = flags.report_id try: # Construct a get request for the specified report. request = service.reports().get(profileId=profile_id, reportId=report_id) # Execute request response = request.execute() # Create a report resource with the fields to update report = { 'accountId': response['accountId'], 'id': response['id'], 'lastModifiedTime': response['lastModifiedTime'], 'name': 'Example Standard Report (Updated)', 'ownerProfileId': response['ownerProfileId'], 'type': response['type'] } # Create the update request request = service.reports().update(profileId=profile_id, reportId=report_id, body=report) # Execute request and print response. pprint.pprint(request.execute()) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') try: # Retrieve preferred deals list in pages and display data as we receive it. request = service.preferreddeals().list() if request is not None: result = request.execute() if 'items' in result: deals = result['items'] for deal in deals: output = 'Deal id "%s" ' % deal['id'] if 'advertiserName' in deal: output += 'for advertiser "%s" ' % deal['advertiserName'] if 'buyerNetworkName' in deal: output += 'on network "%s" ' % deal['buyerNetworkName'] output += 'was found.' print(output) else: print 'No preferred deals found!' except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') try: # Retrieve ad client list in pages and display data as we receive it. request = service.adclients().list(maxResults=MAX_PAGE_SIZE) while request is not None: result = request.execute() ad_clients = result['items'] for ad_client in ad_clients: print ('Ad client for product "%s" with ID "%s" was found. ' % (ad_client['productCode'], ad_client['id'])) print ('\tSupports reporting: %s' % (ad_client['supportsReporting'] and 'Yes' or 'No')) request = service.adclients().list_next(request, result) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v1.3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/dfareporting') profile_id = flags.profile_id try: # Construct the request. request = service.reports().list(profileId=profile_id) while request is not None: # Execute request and print response. response = request.execute() pprint.pprint(response) nextPageToken = response.get('nextPageToken') if nextPageToken: request = service.reports().list_next(request, response) else: request = None except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init(argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') print(argv) url = "https://www.googleapis.com/blogger/v3/blogs/2695162313979547061/posts/" parameters = {'title':'XYZ'} r = requests.get(url, params=parameters)
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'dfareporting', 'v1.3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/dfareporting') profile_id = flags.profile_id report_id = flags.report_id try: # Create a report resource with the fields to patch report = { 'criteria': { 'dateRange': {'relativeDateRange': 'YESTERDAY'} } } # Construct the request. request = service.reports().patch(profileId=profile_id, reportId=report_id, body=report) # Execute request and print response. pprint.pprint(request.execute()) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): service, flags = sample_tools.init( argv, "webmasters", "v3", __doc__, __file__, parents=[argparser], scope="https://www.googleapis.com/auth/webmasters.readonly", ) # For a given landing page, which search terms bring traffic to that page (impressions, clicks and CTR) # and what position does the page appear? How does this change over time? # This is set up to request three dimensions (date, query/keyword, landing page) and # is filtered by keyword and landing page. The filter is 'contains'. request = { "startDate": flags.start_date, "endDate": flags.end_date, "dimensions": ["date", "query", "page"], "dimensionFilterGroups": [ { "filters": [ {"dimension": "query", "operator": "contains", "expression": keyword}, {"dimension": "page", "operator": "contains", "expression": url}, ] } ], "rowLimit": 5000, } response = execute_request(service, flags.property_uri, request) new_file(response)
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') ad_client_id = flags.ad_client_id try: # Retrieve URL channel list in pages and display data as we receive it. request = service.urlchannels().list(adClientId=ad_client_id, maxResults=MAX_PAGE_SIZE) while request is not None: result = request.execute() url_channels = result['items'] for url_channel in url_channels: print ('URL channel with URL pattern "%s" was found.' % url_channel['urlPattern']) request = service.customchannels().list_next(request, result) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') ad_client_id = flags.ad_client_id try: # Retrieve ad unit list in pages and display data as we receive it. request = service.adunits().list(adClientId=ad_client_id, maxResults=MAX_PAGE_SIZE) while request is not None: result = request.execute() ad_units = result['items'] for ad_unit in ad_units: print ('Ad unit with code "%s", name "%s" and status "%s" was found. ' % (ad_unit['code'], ad_unit['name'], ad_unit['status'])) request = service.adunits().list_next(request, result) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'audit', 'v1', __doc__, __file__, scope='https://www.googleapis.com/auth/apps/reporting/audit.readonly') try: activities = service.activities() # Retrieve the first two activities print('Retrieving the first 2 activities...') activity_list = activities.list( applicationId='207535951991', customerId='C01rv1wm7', maxResults='2', actorEmail='*****@*****.**').execute() pprint.pprint(activity_list) # Now retrieve the next 2 events match = re.search('(?<=continuationToken=).+$', activity_list['next']) if match is not None: next_token = match.group(0) print('\nRetrieving the next 2 activities...') activity_list = activities.list( applicationId='207535951991', customerId='C01rv1wm7', maxResults='2', actorEmail='*****@*****.**', continuationToken=next_token).execute() pprint.pprint(activity_list) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, "plus", "v1", __doc__, __file__, scope="https://www.googleapis.com/auth/plus.me" ) try: person = service.people().get(userId="me").execute() print("Got your ID: %s" % person["displayName"]) print() print("%-040s -> %s" % ("[Activitity ID]", "[Content]")) # Don't execute the request until we reach the paging loop below. request = service.activities().list(userId=person["id"], collection="public") # Loop over every activity and print the ID and a short snippet of content. while request is not None: activities_doc = request.execute() for item in activities_doc.get("items", []): print("%-040s -> %s" % (item["id"], item["object"]["content"][:30])) request = service.activities().list_next(request, activities_doc) except client.AccessTokenRefreshError: print("The credentials have been revoked or expired, please re-run" "the application to re-authorize.")
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar') try: page_token = None i = 0 calendars = [] events = [] while True: calendar_list = service.calendarList().list(pageToken=page_token).execute() calendars.extend(calendar_list['items']) print("Loading calendars") page_token = calendar_list.get('nextPageToken') if not page_token: break calendars = [c for c in calendars if c['kind'] == 'calendar#calendarListEntry'] for calendar in calendars: print('{0}\t{1}'.format(i, calendar['summary'])) i += 1 x = getUserInputIndex('\nSelect calendar by typing in the index', i) calendar_id = calendars[int(x)].get('id') print("Selected '{0}'".format(calendars[int(x)].get('summary'))) i = 0 while True: ev = service.events().list(calendarId=calendar_id, pageToken=page_token).execute() events.extend(ev['items']) page_token = ev.get('nextPageToken') print("Loading entries") if not page_token: break events = [e for e in events if e['kind'] == 'calendar#event'] for event in events: print(u"{0}\t{1}\t{2}".format(i, (event.get('start', {}).get('date') if event.get('start', {}).get('date') != None else event.get('start', {}).get('dateTime')), event.get('summary'))) i += 1 x = getUserInputIndex('\nSelect event by typing in the index', i) creation_date = events[int(x)].get('created') i = 0 for event in events: if event.get('created') == creation_date: print(u"{0}\tDeleting {1}\t{2}".format(i, event.get('summary'), event.get('id'))) service.events().delete(calendarId=calendar_id, eventId=event.get('id')).execute() i += 1 print('Deleted {0} entries'.format(i)) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, "adexchangeseller", "v1.1", __doc__, __file__, parents=[argparser], scope="https://www.googleapis.com/auth/adexchange.seller.readonly", ) # Process flags and read their values. ad_client_id = flags.ad_client_id saved_report_id = flags.report_id try: # Retrieve report. if saved_report_id: result = service.reports().saved().generate(savedReportId=saved_report_id).execute() elif ad_client_id: result = ( service.reports() .generate( startDate="2011-01-01", endDate="2011-08-31", filter=["AD_CLIENT_ID==" + ad_client_id], metric=[ "PAGE_VIEWS", "AD_REQUESTS", "AD_REQUESTS_COVERAGE", "CLICKS", "AD_REQUESTS_CTR", "COST_PER_CLICK", "AD_REQUESTS_RPM", "EARNINGS", ], dimension=["DATE"], sort=["+DATE"], ) .execute() ) else: argparser.print_help() sys.exit(1) # Display headers. for header in result["headers"]: print("%25s" % header["name"], end=" ") print() # Display results. for row in result["rows"]: for column in row: print("%25s" % column, end=" ") print() except client.AccessTokenRefreshError: print("The credentials have been revoked or expired, please re-run the " "application to re-authorize")
def __init__(self): self.service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar') scope='https://www.googleapis.com/auth/calendar' client_secrets = 'client_secrets.json' flow = client.flow_from_clientsecrets(client_secrets, scope=scope, message='ERROR')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar.readonly') # print get_calendar_list(service) # l = get_calendar_list(service) l = search_events(service, "track/projects", "StockScraper") for item in l: print item
def __init__(self, profile_id=None): self.service, self.flags = sample_tools.init( [], 'analytics', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/analytics.readonly') if profile_id is None: self.profile_id = self.get_first_profile_id() else: self.profile_id = profile_id self.start_date=(datetime.now()-timedelta(days=30)) self.end_date=datetime.now()
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'analytics', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/analytics.readonly') # Try to make a request to the API. Print the results or handle errors. try: first_profile_id = get_first_profile_id(service) if not first_profile_id: print('Could not find a valid profile for this user.') else: start_date = date(2016, 5, 1) end_date = date(2016, 5, 2) while end_date <= date.today(): rs = check_output(["s3cmd", "ls", "s3://bibusuu/Web_Acquisition_Channel/%s" % start_date]) if len(rs) > 1: print("File Exists for %s, Skipping processing for this file" % start_date) else: page_index = 1 results = get_top_keywords(service, first_profile_id, start_date, end_date, page_index) max_pages = results['totalResults'] while page_index <= max_pages: print("Grabbing Acquisition data for %s to %s page %s" % (start_date, end_date, page_index)) results = get_top_keywords(service, first_profile_id, start_date, end_date, page_index) print_results(results, start_date, page_index) page_index = page_index + 10000 start_date = start_date + timedelta(days=1) end_date = end_date + timedelta(days=1) import_redshift() except TypeError as error: # Handle errors in constructing a query. print(('There was an error in constructing your query : %s' % error)) except HttpError as error: # Handle API errors. print(('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))) except AccessTokenRefreshError: # Handle Auth errors. print('The credentials have been revoked or expired, please re-run ' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangebuyer', 'v1.3', __doc__, __file__, scope='https://www.googleapis.com/auth/adexchange.buyer') try: # Retrieve account list and display data as received result = service.accounts().list().execute() pprint.pprint(result) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'analytics', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/analytics.readonly') # Traverse the Management hiearchy and print results or handle errors. try: traverse_hiearchy(service) except TypeError, error: # Handle errors in constructing a query. print ('There was an error in constructing your query : %s' % error)
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') ad_client_id = flags.ad_client_id try: # Retrieve report in pages and display data as we receive it. start_index = 0 rows_to_obtain = MAX_PAGE_SIZE while True: result = service.reports().generate( startDate='2011-01-01', endDate='2011-08-31', filter=['AD_CLIENT_ID==' + ad_client_id], metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE', 'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK', 'AD_REQUESTS_RPM', 'EARNINGS'], dimension=['DATE'], sort=['+DATE'], startIndex=start_index, maxResults=rows_to_obtain).execute() # If this is the first page, display the headers. if start_index == 0: for header in result['headers']: print('%25s' % header['name'], end=' ') print() # Display results for this page. for row in result['rows']: for column in row: print('%25s' % column, end=' ') print() start_index += len(result['rows']) # Check to see if we're going to go above the limit and get as many # results as we can. if start_index + MAX_PAGE_SIZE > ROW_LIMIT: rows_to_obtain = ROW_LIMIT - start_index if rows_to_obtain <= 0: break if (start_index >= int(result['totalMatchedRows'])): break except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') # Process flags and read their values. ad_client_id = flags.ad_client_id custom_channel_id = flags.custom_channel_id try: # Retrieve ad unit list in pages and display data as we receive it. request = service.customchannels().adunits().list( adClientId=ad_client_id, customChannelId=custom_channel_id, maxResults=MAX_PAGE_SIZE) while request is not None: result = request.execute() ad_units = result['items'] for ad_unit in ad_units: print(( 'Ad unit with code "{0!s}", name "{1!s}" and status "{2!s}" was found. ' .format(ad_unit['code'], ad_unit['name'], ad_unit['status']))) request = service.adunits().list_next(request, result) except client.AccessTokenRefreshError: print( 'The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') #input your blog ID BLOG_ID = 00000000000000000 try: posts = service.posts() request = posts.list(blogId=BLOG_ID) while request != None: posts_doc = request.execute() if 'items' in posts_doc and not (posts_doc['items'] is None): for post in posts_doc['items']: print(post['title']) temp_body = post['content'] temp_body = temp_body.replace( 'http://ecx.images-amazon.com/', 'https://images-fe.ssl-images-amazon.com/') temp_body = temp_body.replace('http://www.amazon.co.jp/', 'https://www.amazon.co.jp/') post['content'] = temp_body update = posts.update(blogId=BLOG_ID, postId=post['id'], body=post).execute() request = posts.list_next(request, posts_doc) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') try: users = service.users() # Retrieve this user's profile information thisuser = users.get(userId='self').execute() print('This user\'s display name is: %s' % thisuser['displayName']) blogs = service.blogs() # Retrieve the list of Blogs this user has write privileges on thisusersblogs = blogs.listByUser(userId='self').execute() for blog in thisusersblogs['items']: print('The blog named \'%s\' is at: %s' % (blog['name'], blog['url'])) posts = service.posts() # List the posts for each blog this user has for blog in thisusersblogs['items']: print('The posts for %s:' % blog['name']) request = posts.list(blogId=blog['id']) while request != None: posts_doc = request.execute() if 'items' in posts_doc and not (posts_doc['items'] is None): for post in posts_doc['items']: print(' %s (%s)' % (post['title'], post['url'])) request = posts.list_next(request, posts_doc) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') ad_client_id = flags.ad_client_id try: # Retrieve custom channel list in pages and display data as we receive it. request = service.customchannels().list(adClientId=ad_client_id, maxResults=MAX_PAGE_SIZE) while request is not None: result = request.execute() custom_channels = result['items'] for custom_channel in custom_channels: print ('Custom channel with id "%s" and name "%s" was found. ' % (custom_channel['id'], custom_channel['name'])) if 'targetingInfo' in custom_channel: print ' Targeting info:' targeting_info = custom_channel['targetingInfo'] if 'adsAppearOn' in targeting_info: print ' Ads appear on: %s' % targeting_info['adsAppearOn'] if 'location' in targeting_info: print ' Location: %s' % targeting_info['location'] if 'description' in targeting_info: print ' Description: %s' % targeting_info['description'] if 'siteLanguage' in targeting_info: print ' Site language: %s' % targeting_info['siteLanguage'] request = service.customchannels().list_next(request, result) except client.AccessTokenRefreshError: print ('The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'adexchangeseller', 'v1.1', __doc__, __file__, parents=[], scope='https://www.googleapis.com/auth/adexchange.seller.readonly') try: # Retrieve preferred deals list in pages and display data as we receive it. request = service.preferreddeals().list() if request is not None: result = request.execute() if 'items' in result: deals = result['items'] for deal in deals: output = 'Deal id "%s" ' % deal['id'] if 'advertiserName' in deal: output += 'for advertiser "%s" ' % deal[ 'advertiserName'] if 'buyerNetworkName' in deal: output += 'on network "%s" ' % deal['buyerNetworkName'] output += 'was found.' print(output) else: print('No preferred deals found!') except client.AccessTokenRefreshError: print( 'The credentials have been revoked or expired, please re-run the ' 'application to re-authorize')
def get_orders_by_campaigns(view_id, start_date, end_date, type="google"): # Authenticate and construct service. if type == "google": filters = 'ga:source==google,ga:medium==cpc' else: filters = 'ga:source==facebook' service, flags = sample_tools.init( [view_id], 'analytics', 'v3', __doc__, __file__, parents=[argparser], scope= 'https://www.googleapis.com/auth/analytics.readonly,https://www.googleapis.com/auth/analytics' ) response = service.data().ga().get( ids=view_id, dimensions='ga:campaign,ga:transactionId,ga:medium,ga:source', metrics='ga:transactions', start_date=start_date, end_date=end_date, filters=filters).execute() orders_dict = dict() campaigns_list = list() for i in response['rows']: campaigns_list.append(i[0]) campaigns_list = list(set(campaigns_list)) for m in campaigns_list: orders_dict[m] = [] for i in response['rows']: orders_dict[i[0]].append(i[1]) return orders_dict
def main(argv): service, flags = sample_tools.init( argv, 'urlshortener', 'v1', __doc__, __file__, scope='https://www.googleapis.com/auth/urlshortener') try: url = service.url() longurl = raw_input('Enter the URL to be shortened: ') body = {'longUrl': longurl} resp = url.insert(body=body).execute() print("") print("Shortened URL: " + resp['id']) print("Long URL: " + resp['longUrl']) except client.AccessTokenRefreshError: print( "The credentials have been revoked or expired, please re-run the application to re-authorize" )
def main(argv): logger.info('App START') logger.info('Start API service') try: service, flags = api_client.init( argv, 'webmasters', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/webmasters', parents=[argparser]) logger.info('Finish API service. SUCCESS') except Exception as e: logger.error('Finish API service. FAILED. ' + str(e)) return False jsonResponse = getList(service, flags) if not jsonResponse: return False successJsonData = setMarkAsFixed(service, flags, jsonResponse) if not successJsonData: return False logResponse(successJsonData)
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar.readonly') try: page_token = None while True: calendar_list = service.calendarList().list( pageToken=page_token).execute() for calendar_list_entry in calendar_list['items']: print(calendar_list_entry['summary']) page_token = calendar_list.get('nextPageToken') if not page_token: break except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'analytics', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/analytics.readonly') # Try to make a request to the API. Print the results or handle errors. try: results = get_api_query(service, flags.table_id).execute() print_results(results) except TypeError as error: # Handle errors in constructing a query. print(('There was an error in constructing your query : %s' % error)) except HttpError as error: # Handle API errors. print(('Arg, there was an API error : %s : %s' % (error.resp.status, error._get_reason()))) except AccessTokenRefreshError: # Handle Auth errors. print ('The credentials have been revoked or expired, please re-run ' 'the application to re-authorize')
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar') try: page_token = None i = 0 calendars = [] events = [] while True: calendar_list = service.calendarList().list( pageToken=page_token).execute() calendars.extend(calendar_list['items']) print("Loading calendars") page_token = calendar_list.get('nextPageToken') if not page_token: break calendars = [ c for c in calendars if c['kind'] == 'calendar#calendarListEntry' ] for calendar in calendars: print('{0}\t{1}'.format(i, calendar['summary'])) i += 1 x = getUserInputIndex('\nSelect calendar by typing in the index', i) calendar_id = calendars[int(x)].get('id') print("Selected '{0}'".format(calendars[int(x)].get('summary'))) i = 0 while True: ev = service.events().list(calendarId=calendar_id, pageToken=page_token).execute() events.extend(ev['items']) page_token = ev.get('nextPageToken') print("Loading entries") if not page_token: break events = [e for e in events if e['kind'] == 'calendar#event'] for event in events: print(u"{0}\t{1}\t{2}".format( i, (event.get('start', {}).get('date') if event.get('start', {}).get('date') != None else event.get( 'start', {}).get('dateTime')), event.get('summary'))) i += 1 x = getUserInputIndex('\nSelect event by typing in the index', i) creation_date = events[int(x)].get('created') i = 0 for event in events: if event.get('created') == creation_date: print(u"{0}\tDeleting {1}\t{2}".format(i, event.get('summary'), event.get('id'))) service.events().delete(calendarId=calendar_id, eventId=event.get('id')).execute() i += 1 print('Deleted {0} entries'.format(i)) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def getSearchConsole_Data(searchQueriesData, sites, startDate, endDate): global sample_tools, service, flags, queriesWithData #import Google API Client library from googleapiclient import sample_tools #initialize the connector to Google API service, flags = sample_tools.init( [__file__], 'webmasters', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/webmasters.readonly') #define URLs for which data is requested #define site properties sites = sites country = sites[0][-2:] #get input data searchQueriesData inputQueries = searchQueriesData[searchQueriesData.columns[0]] queriesHaveData = checkForData_byQueryTerm(inputQueries, sites, startDate=startDate, endDate=endDate) #split data by presence of search console information tableNoData = queriesHaveData[queriesHaveData["hasData"] == 0] queriesWithData = queriesHaveData[queriesHaveData["hasData"] == 1] del (queriesHaveData) #add columns for consistency with already existing data if not tableNoData.empty: tableNoData[["responseQuery", "queryId" ]] = tableNoData["queryTerm"].str.split("&<", expand=True) tableNoData.drop("queryTerm", axis=1, inplace=True) tableNoData = tableNoData[[ "responseQuery", "site", "date", "queryId", "hasData" ]] tableNoData = tableNoData.join(searchQueriesData.set_index("queryId"), on="queryId") tableNoData.insert(0, "clicks", None) tableNoData.insert(1, "impressions", None) tableNoData.insert(2, "position", 20) tableNoData.insert(3, "landing", None) #delete redundant columns tableNoData.drop(["queryId", "Keyword"], axis=1, inplace=True) #the DataFrame is sent as a parameter to get the full data #it is based on the previous check of existance of data in Search Console #for that site, date, and search query if not queriesWithData.empty: table_full = getSearchDataWhereExists(queriesWithData) table_full[["responseQuery", "queryId" ]] = table_full["queryTerm"].str.split("&<", expand=True) table_full.drop("queryTerm", axis=1, inplace=True) table_full = table_full.join(searchQueriesData.set_index("queryId"), on="queryId") table_full.drop(["queryId", "Keyword"], axis=1, inplace=True) colNames = list(tableNoData.columns) table_full = table_full.reindex_axis(colNames, axis=1, copy=False) table_full = pd.concat([tableNoData, table_full]) else: table_full = tableNoData table_full.sort_values(["date", "site", "category_id", "responseQuery"], inplace=True) #create an integer key to join with click share data for the current position table_full["Key_PosWord"] = ( (table_full["position"].round().astype(int)).astype(str) + table_full["wordCount"].apply(lambda x: 4 if x >= 4 else x).astype(str) ).astype(int) #create an integer key to join with click share data for the 1st position table_full["maxPotentialKey_PosWord"] = ( str(1) + table_full["wordCount"].apply(lambda x: 4 if x >= 4 else x).astype(str) ).astype(int) table_full["country"] = country clickShares_byPosition = pd.read_csv(pathFiles + "clickShares_byPosition.csv") table_full = table_full.join( clickShares_byPosition.set_index("Key_PosWord"), on="Key_PosWord") table_full = table_full.join( clickShares_byPosition.set_index("Key_PosWord"), on="maxPotentialKey_PosWord", lsuffix="_currentPos", rsuffix="_1stPos") return table_full
def main(argv): gcloud_key = 'C:\\Dropbox\\gsc\\google_credentials.json' # замените на свой - путь к JSON-ключу от сервисного аккаунта Google Cloud gbq_project_id = 'your-project-id' # замените на свой - идентификатор проекта BigQuery gbq_dataset = 'search_console_google' # замените на свой - название dataset'a в BigQuery domains = [ 'elama.ru', 'elama.global', 'elama.kz', 'elama.com.br' ] # замените на свои - домены-ресурсы, подтвержденные в Google консоли для вебмастеров first_month = current_month = datetime( 2019, 8, 1) # с какого по какой месяц выгружаем данные last_month = datetime(2019, 8, 1) search_types = ['web', 'image', 'video'] dimensions = [['device'], ['country'], ['searchAppearance'], ['page'], ['query'], ['query', 'device'], ['query', 'country'], ['query', 'page']] row_limit = 25000 #current max response size 25000 months = [] service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[], scope='https://www.googleapis.com/auth/webmasters.readonly') credentials = service_account.Credentials.from_service_account_file( gcloud_key) while current_month <= last_month: start_date = current_month.strftime("%Y-%m-%d") end_date = (current_month + relativedelta(months=1) - relativedelta(days=1)).strftime("%Y-%m-%d") months.append({'start_date': start_date, 'end_date': end_date}) current_month += relativedelta(months=1) file = open(os.path.dirname(sys.argv[0]) + '/google_seo_log.txt', 'w') write_log(file, datetime.strftime(datetime.now(), '%Y-%m-%d %H:%M:%S\n')) for domain in domains: for month in months: df_domain = pandas.DataFrame() write_log(file, f'\n{domain:<18}') write_log(file, month['start_date'][0:7] + '\n') for search_type in search_types: df_search_type = pandas.DataFrame() for dimension in dimensions: df_dimension = pandas.DataFrame() dimension_str = str(dimension).replace('[', '').replace( ']', '').replace('\'', '').replace(',', '') start_row = 0 condition = True while condition: request = { 'startDate': month['start_date'], 'endDate': month['end_date'], 'dimensions': dimension, 'searchType': search_type, 'rowLimit': row_limit, 'startRow': start_row } response = service.searchanalytics().query( siteUrl='sc-domain:' + domain, body=request).execute() try: df = pandas.DataFrame.from_dict(response['rows']) start_row = start_row + row_limit except: condition = False else: df['search_type'] = search_type df['domain'] = domain df['period'] = month['start_date'][0:7] df['dimension1'] = dimension[0] df['dimension2'] = dimension[1] if len( dimension) == 2 else None df['clicks'] = df['clicks'].astype(int) df['impressions'] = df['impressions'].astype(int) if len(dimension) == 1: df['value1'] = df['keys'].apply( lambda row: row[0] ) if dimension[0] != 'page' else df[ 'keys'].apply(lambda row: urllib.parse. unquote(row[0])) df['value2'] = None else: df['value1'] = df['keys'].apply( lambda row: row[0] ) if dimension[0] != 'page' else df[ 'keys'].apply(lambda row: urllib.parse. unquote(row[0])) df['value2'] = df['keys'].apply( lambda row: row[1] ) if dimension[1] != 'page' else df[ 'keys'].apply(lambda row: urllib.parse. unquote(row[1])) df = df.drop(columns=['keys']) df_dimension = df_dimension.append( df, ignore_index=True) if len(df_dimension) > 0: clicks_over_0 = 0 for clicks in df_dimension['clicks']: if clicks > 0: clicks_over_0 += 1 write_log(file, f' search_type: {search_type:<7}') write_log(file, f'dimension1: {dimension[0]:<18}') write_log( file, f'dimension2: {str(dimension[1] if len(dimension) == 2 else None):<9}' ) write_log(file, f'all values: {str(len(df_dimension)):<8}') write_log( file, f'values with clicks: {str(clicks_over_0):<7}') write_log( file, f'clicks sum: {int(sum(df_dimension["clicks"])):<8}' ) write_log( file, f'impressions sum: {int(sum(df_dimension["impressions"]))}\n' ) else: write_log(file, f' search_type: {search_type:<7}') write_log(file, f'dimension1: {dimension[0]:<18}') write_log( file, f'dimension2: {str(dimension[1] if len(dimension) == 2 else None):<9}' ) write_log(file, f'all values: {len(df_dimension)}\n') df_search_type = df_search_type.append(df_dimension, ignore_index=True) df_domain = df_domain.append(df_search_type, ignore_index=True) if len(df_domain) > 0: table_name = domain.replace( '.', '_') + '_' + month['start_date'].replace('-', '') try: pandas_gbq.to_gbq(df_domain, gbq_dataset + '.' + table_name, project_id=gbq_project_id, if_exists='replace') except Exception as e: write_log( file, f'\n ERROR. Can\'t write table \'{gbq_project_id}:{gbq_dataset}.{table_name}\' in BigQuery: {e}\n\n' ) else: write_log( file, f'\n Table in BigQuery: \'{gbq_project_id}:{gbq_dataset}.{table_name}\' success created. Number of rows: {len(df_domain)}\n\n' ) else: write_log( file, f' Table in BigQuery don\'t created, because rows: {len(df_domain)}\n\n' ) file.close()
type=str, help=('Site or app URI to query data for (including ' 'trailing slash).')) argparser.add_argument('start_date', type=str, help=('Start date of the requested date range in ' 'YYYY-MM-DD format.')) argparser.add_argument('end_date', type=str, help=('End date of the requested date range in ' 'YYYY-MM-DD format.')) service, flags = sample_tools.init( sys.argv, 'webmasters', 'v3', __doc__, 'client_secrets.json', parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always # check which days in a date range have data before running your main query. # This query shows data for the entire range, grouped and sorted by day, # descending; any days without data will be missing from the results. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['date'] } response = execute_request(service, flags.property_uri, request) pprint(response)
def _initFusionService(self): from googleapiclient import sample_tools args = [None, '--noauth_local_webserver'] service, flags = sample_tools.init(args, 'fusiontables', 'v2', __doc__, __file__) return service
def main(argv): service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always # check which days in a date range have data before running your main query. # This query shows data for the entire range, grouped and sorted by day, # descending; any days without data will be missing from the results. # request = { # 'startDate': flags.start_date, # 'endDate': flags.end_date, # 'dimensions': ['date'] # } # response = execute_request(service, flags.property_uri, request) # print_table(response, 'Available dates') # # Get totals for the date range. # request = { # 'startDate': flags.start_date, # 'endDate': flags.end_date # } # response = execute_request(service, flags.property_uri, request) # print_table(response, 'Totals') # # Get top 10 queries for the date range, sorted by click count, descending. # request = { # 'startDate': flags.start_date, # 'endDate': flags.end_date, # 'dimensions': ['query'], # 'rowLimit': 1500 # } # response = execute_request(service, flags.property_uri, request) # print_table(response, 'Top Queries') # # Get top 11-20 mobile queries for the date range, sorted by click count, descending. # request = { # 'startDate': flags.start_date, # 'endDate': flags.end_date, # 'dimensions': ['query'], # 'dimensionFilterGroups': [{ # 'filters': [{ # 'dimension': 'device', # 'expression': 'mobile' # }] # }], # 'rowLimit': 10, # 'startRow': 10 # } # response = execute_request(service, flags.property_uri, request) # print_table(response, 'Top 11-20 Mobile Queries') # Get top 10 pages for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['page'], 'dimensionFilterGroups': [{ 'filters': [{ 'dimension': 'device', 'expression': 'mobile' }] }], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top Pages')
def auth(argv): service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') #def blogs(): try: username = '******' most_recent = db.most_recent_blog.find_one({'username': username}) print(most_recent) users = service.users() #googleapiclient.discovery.Resource object blogs = service.blogs() #googleapiclient.discovery.Resource object # Retrieve the list of Blogs this user has write privileges on thisusersblogs = blogs.listByUser(userId='self').execute( ) #retrieves all blogs from the user (you = self) posts = service.posts( ) #googleapiclient.discovery.Resource object for posts # List the posts for each blog this user has for blog in thisusersblogs['items']: print('The posts for %s:' % blog['name']) if most_recent is not None: print("hey") request = posts.list(blogId=blog['id'], startDate=most_recent['date']) else: print("hi") request = posts.list(blogId=blog['id']) #uses #googleapiclient.discovery.Resource object for posts to get blog by id if request != None: posts_doc = request.execute() query = {'username': username} update = { 'username': username, 'date': posts_doc['items'][0]['published'] } db.most_recent_blog.update(query, update, upsert=True) content = db.blogs.find_one({'username': username})['content'] if 'items' in posts_doc and not (posts_doc['items'] is None): for post in posts_doc['items']: if most_recent and post['published'] == most_recent['date']: continue #print(post['content']) print(' %s (%s)' % (post['title'], post['url'])) content.append(post['content']) query = {'username': username} update = {'username': username, 'content': content} db.blogs.update(query, update, upsert=True) #request = posts.list_next(request, posts_doc) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize')
def main(argv): # Change to the directory of the script, so that we have a consistent # location for the `calendar.dat` file created by `sample_tools.init` below. os.chdir(os.path.dirname(os.path.abspath(__file__))) # Authenticate and construct service. parser = argparse.ArgumentParser(add_help=False) parser.add_argument( '--org_clock_csv', help=('CSV file containing timestamp entries exported using the ' 'org-clock-csv package')) parser.add_argument( '--min_time', help=('ISO Timestamp before which Org-clock entries or Calendar ' 'entries will not be considered'), default=(datetime.now().astimezone() - timedelta(days=30)).isoformat()) parser.add_argument( '--calendar_id', help='Google Calendar ID of calendar on which to manage events') service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar', parents=[parser]) # Map (title, start_time, end_time) => row logging.getLogger().setLevel(getattr(logging, flags.logging_level)) org_clock_dict = dict() with open(flags.org_clock_csv) as csvfile: reader = csv.DictReader(csvfile) for row in reader: title = '{} ({})'.format(row['task'], row['parents']) start_time = datetime.strptime(row['start'], '%Y-%m-%d %H:%M').astimezone() end_time = datetime.strptime(row['end'], '%Y-%m-%d %H:%M').astimezone() event_key = (title, start_time, end_time) if start_time < datetime.fromisoformat(flags.min_time): logging.info( "Skipping event {} - older then min_time {}".format( event_key_to_isoformat(event_key), flags.min_time)) continue org_clock_dict[(title, start_time, end_time)] = row try: events = service.events().list( calendarId=flags.calendar_id, timeMin=flags.min_time, ).execute()['items'] events_to_delete = [] events_present = [] for event in events: event_key = (event['summary'], datetime.fromisoformat(event['start']['dateTime']), datetime.fromisoformat(event['end']['dateTime'])) if event_key in org_clock_dict: logging.info("Skipping event {} - already found in {}".format( event_key_to_isoformat(event_key), flags.org_clock_csv)) del org_clock_dict[event_key] else: logging.info("Deleting event {} - not found in {}".format( event_key_to_isoformat(event_key), flags.org_clock_csv)) events_to_delete.append(event) for event in events_to_delete: service.events().delete(calendarId=flags.calendar_id, eventId=event['id']).execute() for (title, start_time, end_time) in org_clock_dict.keys(): event = { 'summary': title, 'start': { 'dateTime': start_time.isoformat(), 'timeZone': 'America/Los_Angeles', }, 'end': { 'dateTime': end_time.isoformat(), 'timeZone': 'America/Los_Angeles', }, 'reminders': { 'useDefault': False, 'overrides': [ { 'method': 'popup', 'minutes': 0 }, ], }, } event = service.events().insert(calendarId=flags.calendar_id, body=event).execute() logging.info("event posted: {}".format( event_key_to_isoformat((title, start_time, end_time)))) except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(argv): service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always # check which days in a date range have data before running your main query. # This query shows data for the entire range, grouped and sorted by day, # descending; any days without data will be missing from the results. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['date'] } response = execute_request(service, flags.property_uri, request) print_table(response, 'Available dates') # Get totals for the date range. request = {'startDate': flags.start_date, 'endDate': flags.end_date} response = execute_request(service, flags.property_uri, request) print_table(response, 'Totals') # Get top 10 queries for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top Queries') with open("test.txt", "w", encoding="utf-8") as f: print("[", file=f) print(response, file=f) f.close() # Get top 11-20 mobile queries for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'dimensionFilterGroups': [{ 'filters': [{ 'dimension': 'device', 'expression': 'mobile' }] }], 'rowLimit': 10, 'startRow': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top 11-20 Mobile Queries') # Get top 10 pages for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['page'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top Pages') with open("test.txt", "a", encoding="utf-8") as f: print(",", file=f) print(response, file=f) f.close() # Get the top 10 queries in India, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'dimensionFilterGroups': [{ 'filters': [{ 'dimension': 'country', 'expression': 'ind' }] }], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top queries in India') # Group by both country and device. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['country', 'device'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Group by country and device') # Group by total number of Search Appearance count. # Note: It is not possible to use searchAppearance with other # dimensions. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['searchAppearance'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Search Appearance Features') request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['country'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Group by country and device') with open("test.txt", "a", encoding="utf-8") as f: print(",", file=f) print(response, file=f) f.close() request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['device'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Group by country and device') with open("test.txt", "a", encoding="utf-8") as f: print(",", file=f) print(response, file=f) f.close() request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['searchAppearance'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'searchAppearance') with open("test.txt", "a", encoding="utf-8") as f: print(",", file=f) print(response, file=f) print("]", file=f) f.close()
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'calendar', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/calendar.readonly') allEvents = [] try: page_token = None while True: today = datetime.today() monthAgo = today - relativedelta(months=1) tmax = today.isoformat('T') + "Z" tmin = monthAgo.isoformat('T') + "Z" events = service.events().list( calendarId= '*****@*****.**', timeMin=tmin, timeMax=tmax, singleEvents=True, orderBy='startTime', pageToken=page_token).execute() for event in events['items']: if event['summary'] != 'Closed': eventName = event['summary'] + ', ' + event['description'] date = event['start']['dateTime'][0:10] time = event['start']['dateTime'][11:16] + '-' + event[ 'end']['dateTime'][11:16] objDate = datetime.strptime(date, '%Y-%m-%d') def suffix(d): return 'th' if 11 <= d <= 13 else { 1: 'st', 2: 'nd', 3: 'rd' }.get(d % 10, 'th') def custom_strftime(format, t): return t.strftime(format).replace( '{S}', str(t.day) + suffix(t.day)) dateFormatted = custom_strftime('%A, %B {S}, %Y', objDate) allEvents.append({ 'date': dateFormatted, 'eventInfo': [{ 'eventName': eventName, 'url': 'http://www.lotusbloomfamily.org/allendale.html', 'time': time }] }) page_token = events.get('nextPageToken') eventObject = { 'Allendale_School': { 'allEvents': allEvents, 'location': { "lat": 37.7885972, "lng": -122.20478700000001 } } } jsonEvents = json.dumps(eventObject) print(jsonEvents) if not page_token: break # need to return data as so and saved to database # {'Allendale_School': {'allEvents': [{'date': 'Thursday, June 21st, 2018', # 'eventInfo':[{"eventName": "Oakland Symphony Instrument Petting Zoo", # "url": "http://oaklandlibrary.org/events/melrose-branch/oakland-symphony-instrument-petting-zoo-0", # "time":"1:00pm"}]}], # "location": {"lat": 37.7515679, "lng": -122.17491540000003}}} except client.AccessTokenRefreshError: print('The credentials have been revoked or expired, please re-run' 'the application to re-authorize.')
def main(argv): service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # First run a query to learn which dates we have data for. You should always # check which days in a date range have data before running your main query. # This query shows data for the entire range, grouped and sorted by day, # descending; any days without data will be missing from the results. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['date'] } response = execute_request(service, flags.property_uri, request) print_table(response, 'Available dates') # Get totals for the date range. request = {'startDate': flags.start_date, 'endDate': flags.end_date} response = execute_request(service, flags.property_uri, request) print_table(response, 'Totals') # Get top 10 queries for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query', 'date'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top Queries') with open("test.txt", "w", encoding="utf-8") as f: print("[", file=f) print(response, file=f) f.close() #2 §âspone¦¨sql¡A¨ÿé count = 0 if 'rows' in response: sql = 'insert into query (Date,SCKey,Click,Impressions,CTR,Position) values' for x in response['rows']: count += 1 sql += '( \'' + x['keys'][1] + ' \',\'' + x['keys'][ 0] + '\',' + str(x['impressions']) + ',' + str( x['clicks']) + ',' + str(x['ctr']) + ',' + str( x['position']) + '),' sql = sql[:-1] sql += ';' print(sql) with open("qqq.db", "w", encoding="utf-8") as f: print(sql, file=f) sqlInsert(sql) f.close() # Get top 11-20 mobile queries for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'dimensionFilterGroups': [{ 'filters': [{ 'dimension': 'device', 'expression': 'mobile' }] }], 'rowLimit': 10, 'startRow': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top 11-20 Mobile Queries') # Get top 10 pages for the date range, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['page', 'date'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top Pages') #2 §âspone¦¨sql¡A¨ÿé count = 0 if 'rows' in response: sql = 'insert into page (Date,SCKey,Click,Impressions,CTR,Position) values' for x in response['rows']: count += 1 sql += '(\'' + x['keys'][1] + '\',\'' + x['keys'][0] + '\',' + str( x['impressions']) + ',' + str(x['clicks']) + ',' + str( x['ctr']) + ',' + str(x['position']) + '),' sql = sql[:-1] sql += ';' print(sql) with open("qqq.db", "a", encoding="utf-8") as f: print(sql, file=f) sqlInsert(sql) f.close() # Get the top 10 queries in India, sorted by click count, descending. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['query'], 'dimensionFilterGroups': [{ 'filters': [{ 'dimension': 'country', 'expression': 'ind' }] }], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Top queries in India') # Group by both country and device. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['country', 'device'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Group by country and device') # Group by total number of Search Appearance count. # Note: It is not possible to use searchAppearance with other # dimensions. request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['searchAppearance'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Search Appearance Features') request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['country', 'date'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'country') #2 §âspone¦¨sql¡A¨ÿé count = 0 if 'rows' in response: sql = 'insert into country (Date,SCKey,Click,Impressions,CTR,Position) values' for x in response['rows']: count += 1 sql += '(\'' + x['keys'][1] + '\',\'' + x['keys'][0] + '\',' + str( x['impressions']) + ',' + str(x['clicks']) + ',' + str( x['ctr']) + ',' + str(x['position']) + '),' sql = sql[:-1] sql += ';' print(sql) with open("qqq.db", "a", encoding="utf-8") as f: print(sql, file=f) sqlInsert(sql) f.close() request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['device', 'date'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'Group by country and device') #2 §âspone¦¨sql¡A¨ÿé count = 0 if 'rows' in response: sql = 'insert into device (Date,SCKey,Click,Impressions,CTR,Position) values' for x in response['rows']: count += 1 sql += '(\'' + x['keys'][1] + '\',\'' + x['keys'][0] + '\',' + str( x['impressions']) + ',' + str(x['clicks']) + ',' + str( x['ctr']) + ',' + str(x['position']) + '),' sql = sql[:-1] sql += ';' print(sql) with open("qqq.db", "a", encoding="utf-8") as f: print(sql, file=f) sqlInsert(sql) f.close() request = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['searchAppearance'], 'rowLimit': 10 } response = execute_request(service, flags.property_uri, request) print_table(response, 'searchAppearance') #2 §âspone¦¨sql¡A¨ÿé count = 0 if 'rows' in response: sql = 'insert into searchAppearance (Date,SCKey,Click,Impressions,CTR,Position) values' for x in response['rows']: count += 1 sql += '(\'' + x['keys'][0] + '\',' + str( x['impressions']) + ',' + str(x['clicks']) + ',' + str( x['ctr']) + ',' + str(x['position']) + '),' sql = sql[:-1] sql += ';' print(sql) with open("qqq.db", "a", encoding="utf-8") as f: print(sql, file=f) sqlInsert(sql) f.close()
def post(blogId, isDraft, body): blogger, flags = sample_tools.init('', 'blogger', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/blogger') res = blogger.posts().insert(blogId=blogId, isDraft=isDraft, body=body).execute() return res
def main(argv): # Authenticate and construct service. service, flags = sample_tools.init( argv, 'tasks', 'v1', __doc__, __file__, scope='https://www.googleapis.com/auth/tasks.readonly') try: current_page_token = None print('\nGOOGLE TASKLISTS:') while True: tasks_list = service.tasklists().list( pageToken=current_page_token).execute() for tasks_list_entry in tasks_list['items']: print(' - ' + tasks_list_entry['title'] + ': ' + tasks_list_entry['id']) current_page_token = tasks_list.get('nextPageToken') if not current_page_token: break except client.AccessTokenRefreshError: print( 'Please verify that the client_secrets.json is correctly setup\n' 'Original error: The credentials have been revoked or expired, please re-run' 'the application to re-authorize.') if (TRELLO_API_KEY == '' or TRELLO_TOKEN == ''): print( '\n\nTRELLO_API_KEY or TRELLO_TOKEN are missing, Please add them in the KEYS/ID SECTION' ) sys.exit() client = trolly.client.Client(TRELLO_API_KEY, TRELLO_TOKEN) print('\nTRELLO Boards:') for board in client.get_boards(): print(' - %s' % board) if (TRELLO_BOARD_ID == ''): print( '\n\nTRELLO_BOARD_ID missing. Please choose one above and add it in the KEYS/ID SECTION' ) sys.exit() trello_board = trolly.board.Board(client, TRELLO_BOARD_ID) print('\nLists in Board ID:' + trello_board.id) for rb_list in trello_board.get_lists(): print(' - %s' % rb_list) if (TRELLO_LIST_ID == ''): print( '\n\nTRELLO_LIST_ID missing. Please choose one above and add it in the KEYS/ID SECTION' ) sys.exit() trello_list_to_update = trolly.list.List(client, TRELLO_LIST_ID) print('\n') tasklist_to_import = service.tasks().list( tasklist=GOOGLE_CALENDAR_TASK_LIST_ID, showCompleted=False).execute() for task in tasklist_to_import['items']: test_query_params = {} if task.get('title', ''): print('Importing: ' + task.get('title', '') + '\n\t notes: ' + task.get('notes', '') + '\n\t due: ' + task.get('due', '')) test_query_params['name'] = task.get('title', '') if task.get('due', ''): test_query_params['due'] = task.get('due', '') if task.get('notes', ''): test_query_params['desc'] = task.get('notes', '') trello_added_card = trello_list_to_update.add_card( query_params=test_query_params) '''trello_added_card.add_comments( task.get('notes','') )''' print('\n\nImport SUCCEEDED')
def main(argv): service, flags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') # prepare XLSX file title = 'pages_and_queries_' dates = str(flags.start_date) + '_' + str(flags.end_date) fileName = title + dates workbook = xlsxwriter.Workbook(fileName + '.xlsx', {'strings_to_urls': False}) worksheet1 = workbook.add_worksheet('Pages and Queries') worksheet1.set_column('A:A', 35), worksheet1.set_column( 'B:B', 25), worksheet1.set_column('C:C', 8) worksheet1.set_column('D:D', 10), worksheet1.set_column( 'E:E', 8), worksheet1.set_column('F:F', 8) worksheet2 = workbook.add_worksheet('Totals, Date Range') worksheet2.set_column('A:A', 10), worksheet2.set_column( 'B:B', 10), worksheet2.set_column('C:C', 10) worksheet2.set_column('D:D', 10), worksheet2.set_column( 'E:E', 10), worksheet2.set_column('F:F', 10) worksheet3 = workbook.add_worksheet('Totals, Each Date') worksheet3.set_column('A:A', 10), worksheet3.set_column( 'B:B', 10), worksheet3.set_column('C:C', 10) worksheet3.set_column('D:D', 10), worksheet3.set_column( 'E:E', 10), worksheet3.set_column('F:F', 10) worksheet4 = workbook.add_worksheet('Totals, Each Page') worksheet4.set_column('A:A', 35), worksheet4.set_column( 'B:B', 10), worksheet4.set_column('C:C', 10) worksheet4.set_column('D:D', 10), worksheet4.set_column('E:E', 10) # Get pages and queries, worksheet1 headings_ws1 = [ 'Page', 'Keywords', 'Clicks', 'Impressions', 'CTR', 'Position' ] worksheet1.write_row('A2', headings_ws1) worksheet1.autofilter(1, 0, 1000000, 5) def get_query_data(start_row): col_ws1 = 0 global row_ws1 global page_query_list request_queries = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['page', 'query'], 'rowLimit': 5000, 'startRow': start_row } response_queries = execute_request(service, flags.property_uri, request_queries) if response_queries == {'responseAggregationType': 'byPage'}: for item in page_query_list: worksheet1.write_row(row_ws1, col_ws1, item) row_ws1 += 1 workbook.close() sys.exit() request_query_data = response_queries['rows'] print(str(len(request_query_data)) + ' Pages and Queries:') print('page', '|', 'keywords', '|', 'clicks', '|', 'impressions', '|', 'ctr', '|', 'position') for row in request_query_data: print(row['keys'][0], '|', row['keys'][1], '|', row['clicks'], '|', row['impressions'], '|', round(row['ctr'], 3), '|', round(row['position'], 3)) page_query_list.append([ row['keys'][0], row['keys'][1], row['clicks'], row['impressions'], round(row['ctr'], 3), round(row['position'], 3) ]) start_row += 5001 time.sleep(1.5) get_query_data(start_row) # Get totals for the date range, worksheet2 request_all_dates = { 'startDate': flags.start_date, 'endDate': flags.end_date } response_all_dates = execute_request(service, flags.property_uri, request_all_dates) query_data = response_all_dates['rows'] title_all_dates = 'Totals for Date Range' print(title_all_dates) print('start date', '|', 'end date', '|', 'clicks', '|', 'impressions', '|', 'ctr', '|', 'position') headings_ws2 = [ 'Start Date', 'End Date', 'Clicks', 'Impressions', 'CTR', 'Position' ] worksheet2.write_row('A2', headings_ws2) row_ws2 = 2 col_ws2 = 0 for row in query_data: print(flags.start_date, '|', flags.end_date, '|', row['clicks'], '|', row['impressions'], '|', round(row['ctr'], 3), '|', round(row['position'], 3)) worksheet2.write_row(row_ws2, col_ws2, [ flags.start_date, flags.end_date, row['clicks'], row['impressions'], round(row['ctr'], 3), round(row['position'], 3) ]) # Get totals for each date, worksheet3 request_each_date = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['date'] } response_each_date = execute_request(service, flags.property_uri, request_each_date) query_data = response_each_date['rows'] title_each_date = 'Totals for Each Date' print(title_each_date) print('date', '|', 'day', '|', 'clicks', '|', 'impressions', '|', 'ctr', '|', 'position') headings_ws3 = ['Date', 'Day', 'Clicks', 'Impressions', 'CTR', 'Position'] worksheet3.write_row('A2', headings_ws3) worksheet3.autofilter(1, 0, 1000000, 5) row_ws3 = 2 col_ws3 = 0 for row in query_data: day_of_week = datetime.datetime.strptime(str(row['keys'][0]), "%Y-%m-%d").strftime('%A') print(row['keys'][0], '|', day_of_week, '|', row['clicks'], '|', row['impressions'], '|', round(row['ctr'], 3), '|', round(row['position'], 3)) worksheet3.write_row(row_ws3, col_ws3, [ row['keys'][0], day_of_week, row['clicks'], row['impressions'], round(row['ctr'], 3), round(row['position'], 3) ]) row_ws3 += 1 # Get totals for each page, worksheet4 request_each_page = { 'startDate': flags.start_date, 'endDate': flags.end_date, 'dimensions': ['page'] } response_each_page = execute_request(service, flags.property_uri, request_each_page) query_data = response_each_page['rows'] title_each_page = 'Totals for Each Page' print(title_each_page) print('page', '|', 'clicks', '|', 'impressions', '|', 'ctr', '|', 'position') headings_ws4 = ['Page', 'Clicks', 'Impressions', 'CTR', 'Position'] worksheet4.write_row('A2', headings_ws4) worksheet4.autofilter(1, 0, 1000000, 4) row_ws4 = 2 col_ws4 = 0 for row in query_data: print(row['keys'][0], '|', row['clicks'], '|', row['impressions'], '|', round(row['ctr'], 3), '|', round(row['position'], 3)) worksheet4.write_row(row_ws4, col_ws4, [ row['keys'][0], row['clicks'], row['impressions'], round(row['ctr'], 3), round(row['position'], 3) ]) row_ws4 += 1 get_query_data(0)
from markdown import markdown from oauth2client import client from googleapiclient import sample_tools import os os.environ['TZ'] = 'Asia/Taipei' argv = "" # 認證並建立服務 # name of the api is "blogger", version is "v3" # description of the api is __doc__ # file name of the application: location of client_secrets.json service, flags = sample_tools.init( argv, 'blogger', 'v3', __doc__, "./../../client_secrets.json", scope='https://www.googleapis.com/auth/blogger') def get_cat_tag_content(data): # 請注意, 因為 data 來自 .md 的檔案 內容, 第1行為 --- # 用跳行符號分割 data_list = data.split("\n") #第 2 行為 title title = data_list[1] #第 4 行為 category category = data_list[3] #第 5 行為 tags tags = data_list[4] # 有多項資料的 content 型別為數列
def initialize_service(argv, id): service, flags = sample_tools.init( argv, id, 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/' + id + '.readonly') return service
def main(argv): # use the discovery doc to build a service that we can use to # make MyBusiness API calls and authenticate the user so we can access their account service, flags = sample_tools.init( argv, "mybusiness", "v4", __doc__, __file__, scope="https://www.googleapis.com/auth/business.manage", discovery_filename=discovery_doc) ## STEP 1 - Get the List of all accounts for the authenticated user # GMB API call -- Method:accounts.list output = service.accounts().list().execute() print("List of GMB Accounts: \n\n") print(json.dumps(output, indent=2) + "\n") # extract the account name which will be used for further API calls gmbAccount = output["accounts"][1]["name"] ## STEP 2 - Get the list of all available locations for the specified account (gmbAccount) # Limitation - 100 locations fetched per API Call # we use 'pageToken' - to fetch all the available locations try: page_token = None # Defining empty dataframe and columns names where extracted info will be stored loc_df = pd.DataFrame() column_names = [ 'locationId', 'StoreID', 'Street', 'PostalCode', 'City', 'CountryCode', 'PrimaryPhone' ] while True: print("Fetching list of locations for account " + gmbAccount, "\n\n") # GMB API Call - Method:accounts.locations.list loclist = service.accounts().locations().list( parent=gmbAccount, pageToken=page_token).execute() print(json.dumps(loclist, indent=2)) # Extracting only the necessary information from the response and append it to dataframe for i in loclist['locations']: name = i['name'] storeCode = i['storeCode'] address = i['address']['addressLines'][0] postalCode = i['address']['postalCode'] locality = i['address']['locality'] regionCode = i['address']['locality'] primaryPhone = i['primaryPhone'] loc_df = loc_df.append(pd.Series([ name, storeCode, address, postalCode, locality, regionCode, primaryPhone ]), ignore_index=True) # Checking for the 'nextPageToken' in the response # if not available then break the loop page_token = loclist.get('nextPageToken') if not page_token: break finally: print("All locations fetched for the account") print("Next Page Token" + str(page_token)) loc_df.columns = column_names #loc_df.to_csv('temp/allinone.csv', index=False) ## STEP 3: Getting the review data for each locationId # Defining an empty dataframe and column names for storing the extracted review data review_df = pd.DataFrame() columns1 = [ 'locationId', 'ReviewerName', 'StarRating', 'ReviewCreateTime', 'ReviewerComments' ] # Loop over each locationId and generate request body for the API call for x in loc_df['locationId']: body = {"locationNames": [x]} print("Getting reviews for locationId " + x) # GMB API Call - Method:accounts.locations.batchGetReviews revlist = service.accounts().locations().batchGetReviews( name=gmbAccount, body=body).execute() # extracting necessary information from the response message for j in revlist['locationReviews']: locationId = j['name'] ReviewerName = j['review']['reviewer']['displayName'] ReviewerRating = j['review']['starRating'] ReviewUpdateTime = j['review']['createTime'] ReviewerComments = j['review'].get('comment', 'NONE') # appending the extracted values into dataframe review_df = review_df.append(pd.Series([ locationId, ReviewerName, ReviewerRating, ReviewUpdateTime, ReviewerComments ]), ignore_index=True) # Appending all location data and review data into single dataframe review_df.columns = columns1 combined = pd.merge(loc_df, review_df, on='locationId') combined.to_csv('temp/allmerged.csv', index=False)
def gaRealtimeJob(): try: gaDate = datetime.datetime.now() + datetime.timedelta(hours=5) gaDate = gaDate + datetime.timedelta(minutes=30) logger.info(gaDate) service, flags = sample_tools.init( sys.argv, 'analytics', 'v3', __doc__, __file__, scope='https://www.googleapis.com/auth/analytics.readonly') result = service.data().realtime().get( ids='ga:xxx', metrics='rt:activeUsers').execute() androidActiveUsers = getActiveUsers(result) result = service.data().realtime().get( ids='ga:yyy', metrics='rt:activeUsers').execute() webActiveUsers = getActiveUsers(result) logger.info('androidActiveUsers: ' + androidActiveUsers + ", webActiveUsers: " + webActiveUsers) jsonData = {} with open('dataFile') as json_file: jsonData = json.load(json_file) for key, value in jsonData[0].iteritems(): # print key + " " + value if key == 'androidActiveUsers': oldAndroidActiveUsers = value if key == 'webActiveUsers': oldWebActiveUsers = value logger.info('oldAndroidActiveUsers: ' + oldAndroidActiveUsers + ', oldWebActiveUsers: ' + oldWebActiveUsers) # updating data if current count is max than old count if int(androidActiveUsers) > int(oldAndroidActiveUsers): jsonData[0]['androidActiveUsers'] = androidActiveUsers jsonData[0]['androidTime'] = gaDate.strftime ("%d-%m-%Y %H:%M:%S") if int(webActiveUsers) > int(oldWebActiveUsers): jsonData[0]['webActiveUsers'] = webActiveUsers jsonData[0]['webTime'] = gaDate.strftime ("%d-%m-%Y %H:%M:%S") currentHour = gaDate.strftime ("%H") logger.info(currentHour) mailSentFile = 'mailFlag' if currentHour in mailSentHrs: if not os.path.exists(mailSentFile): logger.info("Sending Mail") sendMail(json.dumps(jsonData)) open(mailSentFile, 'w') jsonData[0]['androidActiveUsers'] = '0' jsonData[0]['androidTime'] = '0' jsonData[0]['webActiveUsers'] = '0' jsonData[0]['webTime'] = '0' else: logger.info("not sending mail") if os.path.exists(mailSentFile): os.remove(mailSentFile) # writing updated data to file with open('dataFile', 'w') as outfile: json.dump(jsonData, outfile) # except (TypeError, error): # # Handle errors in constructing a query. # logger.error('There was an error in constructing your query : %s' % error) # except (HttpError, error): # # Handle API errors. # logger.error('Arg, there was an API error : %s : %s' % # (error.resp.status, error._get_reason())) except socket_error as serr: logger.error('Socket error with error number: %s' % (str(serr.errno))) except: logger.exception('Exception while fetching GA') raise
def main(argv): # Use the discovery doc to build a service that we can use to make # MyBusiness API calls, and authenticate the user so we can access their # account service, flags = sample_tools.init( argv, "mybusiness", "v4", __doc__, __file__, scope="https://www.googleapis.com/auth/business.manage", discovery_filename=discovery_doc) # Get the list of accounts the authenticated user has access to output = service.accounts().list().execute() firstAccount = output["accounts"][0]["name"] #TAKE VALUE OF LAST EXECUTED DATE FROM DB last_executed_date = "2020-02-15 10:48:55" lid = "7760562933001019237" body = { "locationNames": ["accounts/108466677369484329492/locations/7760562933001019237"], "basicRequest": { "metricRequests": [ { "metric": "QUERIES_DIRECT" }, { "metric": "QUERIES_INDIRECT" }, { "metric": "QUERIES_CHAIN" }, { "metric": "ACTIONS_WEBSITE" }, { "metric": "ACTIONS_PHONE" }, { "metric": "VIEWS_SEARCH" }, ], "timeRange": { "startTime": "2020-04-02T00:00:00Z", "endTime": "2020-04-09T00:00:00Z" } } } locationsList = service.accounts().locations().list( parent=firstAccount).execute() l2 = locationsList #print(l2["locations"][0]["locationName"]) locationInsights = service.accounts().locations().reportInsights( name=firstAccount, body=body).execute() #print(json.dumps(locationInsights,indent = 2) +"\n") metricValList = locationInsights['locationMetrics'][0]['metricValues'] queriesIndirect = 0 queriesChain = 0 for valObject in metricValList: print('Metric: ' + valObject['metric'] + '\tValue: ' + valObject['totalValue']['value']) if (valObject['metric'] == 'QUERIES_CHAIN'): queriesChain = int(valObject['totalValue']['value']) if (valObject['metric'] == 'QUERIES_INDIRECT'): queriesIndirect = int(valObject['totalValue']['value']) print('QUERIES CATEGORY : ' + str(queriesIndirect - queriesChain))
def main(argv): lService, lFlags = sample_tools.init( argv, 'webmasters', 'v3', __doc__, __file__, parents=[argparser], scope='https://www.googleapis.com/auth/webmasters.readonly') lResponse = lService.sites().list().execute() lRows = lResponse['siteEntry'] # Read the ignore list lIgnoreList = [] if os.path.isfile(lFlags.ignore_list): lIgnoreList = [line.rstrip('\n') for line in open(lFlags.ignore_list)] else: print "No Ignore List... continuing" lURLList = [] lKeyFile = open('Temp/URLs.txt', "w") lClickFile = open('Temp/Click.txt', "w") lImpFile = open('Temp/Imp.txt', "w") lCTRFile = open('Temp/CTR.txt', "w") lPosFile = open('Temp/Pos.txt', "w") # TODO: Auto-sort rows? # We want root sites (like http://adobe.com/) to come later than sub-sites (http://adobe.com/jp/) for lRow in lRows: if not 'siteUnverifiedUser' in lRow['permissionLevel']: if not "sc-set:" in lRow['siteUrl']: theSubURL = lRow['siteUrl'] lContinue = 1 for lStr in lIgnoreList: if theSubURL.startswith(lStr): lContinue = 0 if lContinue: lIgnoreList.append(theSubURL) # TODO: Batch request this? lRequest = { 'startDate': lFlags.start_date, 'endDate': lFlags.end_date, 'dimensions': ['page'], 'rowLimit': 5000 } lResponse = execute_request(lService, theSubURL, lRequest) print theSubURL if 'rows' in lResponse: for lRow in lResponse['rows']: aKey = 'N/A' if 'keys' in lRow: aKey = u','.join(lRow['keys']).encode('utf-8') lExists = 0 if aKey in lURLList: # Skip duplicates... lExists = 1 if lExists == 0: lKeyFile.write(aKey + '\n') lClickFile.write(("%.0f" % lRow['clicks']) + '\n') lImpFile.write(("%.0f" % lRow['impressions']) + '\n') lCTRFile.write(("%.3f" % (lRow['ctr'] * 100)) + '%' + '\n') lPosFile.write(("%.9f" % lRow['position']) + '\n') lURLList.append(aKey) else: print 'NO DATA' lKeyFile.close() lClickFile.close() lImpFile.close() lCTRFile.close() lPosFile.close()