def get(self): try: # Read server-side OAuth 2.0 credentials from datastore and # raise an exception if credentials not found. credentials = StorageByKeyName(CredentialsModel, USER_AGENT, 'credentials').locked_get() if not credentials or credentials.invalid: raise Exception('missing OAuth 2.0 credentials') # Authorize HTTP session with server credentials and obtain # access to prediction API client library. http = credentials.authorize(httplib2.Http()) service = build('prediction', 'v1.5', http=http) papi = service.trainedmodels() # Build prediction data (csvInstance) dynamically based on form input. vals = [] body = {'input' : {'csvInstance' : [str(self.request.get('text'))] }} # Make a prediction and return JSON results to Javascript client. ret = papi.predict(id='bullying', body=body).execute() print ret self.response.headers['Content-Type'] = 'application/javascript' self.response.out.write(json.dumps(ret)) except Exception, err: # Capture any API errors here and pass response from API back to # Javascript client embedded in a special error indication tag. err_str = str(err) if err_str[0:len(ERR_TAG)] != ERR_TAG: err_str = ERR_TAG + err_str + ERR_END self.response.out.write(err_str)
def get(self): user = users.get_current_user() user_id = user.user_id() credentials = StorageByKeyName( Credentials,user_id, 'credentials').get() lCred = LinkedInCred.get_by_key_name(user_id) if credentials is None or credentials.invalid == True: callback = self.request.relative_url('/oauth2callback') authorize_url = FLOW.step1_get_authorize_url(callback) memcache.set(user_id + 'goog', pickle.dumps(FLOW)) self.redirect(authorize_url) elif lCred is None: user = users.get_current_user() request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken' client = oauth.Client(consumer) callback = self.request.relative_url('/linkedin') resp,content = client.request(request_token_url,"POST",body=urllib.urlencode({'oauth_callback':callback})) request_token = dict(cgi.parse_qsl(content)) memcache.set(user_id + 'linked',pickle.dumps(request_token)) authorize_url = 'https://api.linkedin.com/uas/oauth/authorize?oauth_token=%s&oauth_callback=%s' % (request_token['oauth_token'],'http://localhost:8080/linkedin') self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) service = build("calendar", "v3", http=http) date = datetime.now() rfc_stamp = rfc3339(date) events = service.events().list(calendarId='primary',singleEvents=True,orderBy='startTime', maxResults=5,timeMin=rfc_stamp).execute() s = Sentinal(lCred.user_key,lCred.user_secret) sidebar = [] getID = self.request.get('getID') sq = None for event in events['items']: if getID == event['id']: sq = event['summary'] sidebar.append({'id':event['id'],'name':event['summary']}) if sq is None: getID = sidebar[0]['id'] sq = sidebar[0]['name'] entities = sq.split(',') cache_get = memcache.get(getID) if cache_get: resp = pickle.loads(cache_get) else: if(len(entities) < 2): self.redirect('http://i.imgur.com/mvXs4.png') return resp = s.createResponse(sidebar,entities[0],entities[1]) memcache.set(getID,pickle.dumps(resp),time=600) path = os.path.join(os.path.dirname(__file__),'tesdex.html') self.response.out.write(template.render(path,resp))
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: flow = OAuth2WebServerFlow( # Visit https://code.google.com/apis/console to # generate your client_id, client_secret and to # register your redirect_uri. client_id='<YOUR CLIENT ID HERE>', client_secret='<YOUR CLIENT SECRET HERE>', scope='https://www.googleapis.com/auth/buzz', user_agent='buzz-cmdline-sample/1.0', domain='anonymous', xoauth_displayname='Google App Engine Example App') callback = self.request.relative_url('/auth_return') authorize_url = flow.step1_get_authorize_url(callback) memcache.set(user.user_id(), pickle.dumps(flow)) self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) service = build("buzz", "v1", http=http) activities = service.activities() activitylist = activities.list(scope='@consumption', userId='@me').execute() path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') self.response.out.write( template.render( path, {'activitylist': activitylist, 'logout': logout }))
def update_calendar(self, calendar_service): """Updates the user's calendar""" # Check if this user is an expert and has an id if self.user_id and self.is_expert: credentials = StorageByKeyName( CredentialsModel, self.user_id, 'credentials').get() if credentials is not None: if credentials.invalid: logging.error("Credentials invalid for %s" % self.email) # return try: email = self.email # Authorize takes care of refreshing an expired token http = credentials.authorize(httplib2.Http()) now = datetime.utcnow().replace(microsecond=0) tomorrow = now + timedelta(days=1) body = {} body['timeMax'] = tomorrow.isoformat() + 'Z' body['timeMin'] = now.isoformat() + 'Z' body['items'] = [{'id': email}] response = calendar_service.freebusy().query(body=body).execute(http=http) logging.info(response) if response.get('calendars') and response['calendars'].get(email) and response['calendars'][email].get('busy') and not response['calendars'][email].get('errors'): # Store the busy schedule logging.info('storing busy schedule') self.busy_time = json.dumps(response['calendars'][email]['busy']) self.put() except AccessTokenRefreshError: logging.error('AccessTokenRefreshError for user id ' + self.user_id)
def create_playlist(self): ''' Creates a new playlist on YouTube and persist it as a Playlist instance in datastore. ''' now_date = datetime.now().date() print "create_playlist start" credentials = StorageByKeyName(CredentialsModel, self.user_id, 'credentials').get() print "create_playlist got creds" http = credentials.authorize(Http()) print "create_playlist authorized creds" request = YOUTUBE.playlists().insert( part="snippet,status", body={ 'snippet': { 'title': "DailyGrooves %s" % now_date, 'description': "DailyGrooves %s" % now_date }, 'status': { 'privacyStatus': 'public' } }) response = request.execute(http=http) print "create_playlist executed req" self.playlist_id = response["id"] playlist = Playlist(id=self.playlist_id, date=datetime.now()) playlist.put() print "Playlist: http://www.youtube.com/id?list=%s" % self.playlist_id
def post(self): data = json.loads(self.request.body) # Returns a 403 Forbidden (authenticating will make no difference) if data.get('verifyToken') != 'I_AM_YOUR_FATHER': logging.error('Unauthorized request to the subscription endpoint.') return self.abort(403) # Get the credentials, you could also check credentials.refresh_token is not None self.user_id = data.get('userToken') credentials = StorageByKeyName(CredentialsModel, self.user_id, 'credentials').get() if not credentials: logging.error('Authentication is required and has failed.') return self.abort(401) # http was previously authorized by the decorator self.http = credentials.authorize(httplib2.Http()) try: # Handle the appropriate type of subscription if data.get('collection') == 'locations': self._handle_location(data) elif data.get('collection') == 'timeline': self._handle_timeline(data) except Exception as e: logging.error('Failed SubscriptionHandler for user_id %s: %s', (self.user_id, str(e)))
def post(self): data = json.loads(self.request.body) # Returns a 403 Forbidden (authenticating will make no difference) if data.get('verifyToken') != 'I_AM_YOUR_FATHER': logging.error('Unauthorized request to the subscription endpoint.') return self.abort(403) # Get the credentials, you could also check credentials.refresh_token is not None self.user_id = data.get('userToken') credentials = StorageByKeyName(CredentialsModel, self.user_id, 'credentials').get() if not credentials: logging.error('Authentication is required and has failed.') return self.abort(401) # http was previously authorized by the decorator self.http = credentials.authorize(httplib2.Http()) try: # Handle the appropriate type of subscription if data.get('collection') == 'locations': self._handle_location(data) elif data.get('collection') == 'timeline': self._handle_timeline(data) except Exception as e: logging.error('Failed SubscriptionHandler for user_id %s: %s', (self.user_id, str(e)))
def get(self): service = None user = users.get_current_user() user_id = user.user_id() credentials = StorageByKeyName( Credentials,user_id, 'credentials').get() if credentials is None or credentials.invalid == True: callback = self.request.relative_url('/oauth2callback') authorize_url = FLOW.step1_get_authorize_url(callback) memcache.set(user_id + 'goog', pickle.dumps(FLOW)) return self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) service = build("drive", "v2", http=http) file_entry = retrieve_file_by_name(service,'SleepTime by.txt')[0] print type(file_entry) #print file_entry['exportLinks']['application/pdf'] #f = get_file(service,file_entry['id']) tsv_data = download_file(service,file_entry) read_into_db(tsv_data) self.response.write('Hello world!')
def create_playlist(self): ''' Creates a new playlist on YouTube and persist it as a Playlist instance in datastore. ''' now_date = datetime.now().date() print "create_playlist start" credentials = StorageByKeyName( CredentialsModel, self.user_id, 'credentials').get() print "create_playlist got creds" http = credentials.authorize(Http()) print "create_playlist authorized creds" request = YOUTUBE.playlists().insert( part="snippet,status", body={'snippet': {'title': "DailyGrooves %s" % now_date, 'description': "DailyGrooves %s" % now_date}, 'status': {'privacyStatus': 'public'} } ) response = request.execute(http=http) print "create_playlist executed req" self.playlist_id = response["id"] playlist = Playlist(id=self.playlist_id, date=datetime.now()) playlist.put() print "Playlist: http://www.youtube.com/id?list=%s" % self.playlist_id
def create_playlist(self, playlistName, epochValue): ''' Creates a new playlist on YouTube with given name and persist it as a MonthlyPlaylist instance in datastore. ''' print "create_playlist start" credentials = StorageByKeyName( CredentialsModel, self.user_id, 'credentials').get() print "create_playlist got creds" http = credentials.authorize(Http()) print "create_playlist authorized creds" request = YOUTUBE.playlists().insert( part="snippet,status", body=dict( snippet=dict( title=playlistName, description="Songs added in %s" % playlistName ), status=dict( privacyStatus="public" ) ) ) response = request.execute(http=http) print "create_playlist executed req" playlist_id = response["id"] playlist = MonthlyPlaylist(id=playlist_id, name=playlistName, epochVal=epochValue, date=datetime.now(), counter=0) playlist.put() print "Playlist created: http://www.youtube.com/id?list=%s" % playlist_id self.memcache_today_playlists() return playlist
def get(self): credentials = StorageByKeyName(Credentials, "key_for_credentials", 'credentials').get() PageOutput = "" PageOutput += "<br><br>" if not credentials or credentials.invalid or credentials.refresh_token is None: PageOutput += "Missing OAuth 2.0 Credentials" else: if credentials.refresh_token is not None: PageOutput += "This app has someone's " + str(len(credentials.refresh_token)) + " character refresh token on file!" else: PageOutput += "I can not find your refresh token!" PageOutput += "<br><br>" http = httplib2.Http() http = credentials.authorize(http) resp, content = http.request("https://www.googleapis.com/tasks/v1/users/@me/lists", "GET") ResultObj = json.loads(content) PageOutput += "The account authorizing this app has " + str(len(ResultObj['items'])) + " task list(s)." PageOutput += "<br><br><br>" PageOutput += "<a href='/oauth2authorize'>Authorize</a><br>" PageOutput += "<a href='/'>Check</a><br>" PageOutput += "<a href='/oauth2revoke'>Revoke</a><br>" self.response.out.write(PageOutput)
def post(self): calendaruserid = self.request.get('calendaruserid') calendarid = self.request.get('calendarid') creds = StorageByKeyName(Credentials, calendaruserid, 'credentials').get() http = httplib2.Http() http = creds.authorize(http) service = build(serviceName='calendar', version='v3', http=http, developerKey='AIzaSyD51wdv-kO02p29Aog7OXmL2eEG0F5ngZM') events = service.events().list(calendarId=calendarid).execute() for event in events['items']: if str(datetime.date.today()) == event['start']['date']: filterdiclist = [{'operator' : 'userid = ', 'value' : calendaruserid},{'operator' : 'notificationsetting = ', 'value' : 'Yes'}] query = dbQuery() user_list = query.get_results(userProfile, 1, filterdiclist) mail.send_mail(sender="*****@*****.**", to=user_list[0].emailaddress, subject="Here is your run today!", body=""" Dear """ + user_list[0].firstname + """ """ + user_list[0].lastname + """ Your run today is the following: """ + event['summary'] + """ Thanks! The race date setter app!""" )
def get_posts(self, migration, scan_url=None): """Fetches a page of posts. Args: migration: Migration scan_url: string, the API URL to fetch the current page of posts. If None, starts at the beginning. Returns: (posts, next_scan_url). posts is a sequence of Migratables. next_scan_url is a string, the API URL to use for the next scan, or None if there is nothing more to scan. """ # TODO: expose as options # https://dev.googleplus.com/docs/api/1.1/get/statuses/user_timeline # get this user's OAuth credentials credentials = StorageByKeyName(CredentialsModel, self.gae_user_id, "credentials").get() if not credentials: logging.error("Giving up: credentials not found for user id %s.", self.gae_user_id) self.error(299) return # TODO: convert scan_url to paging param(s) # if not scan_url: # scan_url = API_POSTS_URL % self.key().name() # gp = as_googleplus.GooglePlus(None) # resp = json.loads(gp.urlfetch(scan_url)) # fetch the json stream and convert it to atom. # (if i use collection 'user' instead of 'public', that would get *all* # posts, not just public posts, but that's not allowed yet. :/ ) resp = ( json_service.activities() .list(userId="me", collection="public") .execute(credentials.authorize(httplib2.Http())) ) posts = [] for post in resp["items"]: id = post["id"] app = post.get("source") if app and app in APPLICATION_BLACKLIST: logging.info("Skipping post %d", id) continue posts.append(GooglePlusPost(key_name_parts=(str(id), migration.key().name()), json_data=json.dumps(post))) next_scan_url = None # if posts: # scan_url + '&max_id=%s' % posts[-1].id() # # XXX remove # if posts and posts[-1].data()['created_time'] < '2013-01-01': # next_scan_url = None # # XXX return posts, next_scan_url
def get_posts(self, migration, scan_url=None): """Fetches a page of posts. Args: migration: Migration scan_url: string, the API URL to fetch the current page of posts. If None, starts at the beginning. Returns: (posts, next_scan_url). posts is a sequence of Migratables. next_scan_url is a string, the API URL to use for the next scan, or None if there is nothing more to scan. """ # TODO: expose as options # https://dev.googleplus.com/docs/api/1.1/get/statuses/user_timeline # get this user's OAuth credentials credentials = StorageByKeyName(CredentialsModel, self.gae_user_id, 'credentials').get() if not credentials: logging.error('Giving up: credentials not found for user id %s.', self.gae_user_id) self.error(299) return # TODO: convert scan_url to paging param(s) # if not scan_url: # scan_url = API_POSTS_URL % self.key().name() # gp = as_googleplus.GooglePlus(None) # resp = json.loads(gp.urlfetch(scan_url)) # fetch the json stream and convert it to atom. # (if i use collection 'user' instead of 'public', that would get *all* # posts, not just public posts, but that's not allowed yet. :/ ) resp = json_service.activities().list(userId='me', collection='public')\ .execute(credentials.authorize(httplib2.Http())) posts = [] for post in resp['items']: id = post['id'] app = post.get('source') if app and app in APPLICATION_BLACKLIST: logging.info('Skipping post %d', id) continue posts.append(GooglePlusPost(key_name_parts=(str(id), migration.key().name()), json_data=json.dumps(post))) next_scan_url = None # if posts: # scan_url + '&max_id=%s' % posts[-1].id() # # XXX remove # if posts and posts[-1].data()['created_time'] < '2013-01-01': # next_scan_url = None # # XXX return posts, next_scan_url
def get(self): # check if credential already setup credentials = StorageByKeyName(CredentialsModel, USER_AGENT, 'credentials').locked_get() if not credentials or credentials.invalid: flow = OAuth2WebServerFlow( client_id=client_id, client_secret=client_secret, scope=scope, user_agent=USER_AGENT, access_type = 'offline', approval_prompt='force', redirect_uri=self.request.relative_url('/auth_return') ) authorize_url = flow.step1_get_authorize_url() memcache.set('a', pickle.dumps(flow)) self.redirect(authorize_url) else: try: #"2nd around after getting auth, now do the query" print("2nd around after getting auth, now do the query\n") # read query.csv returns list of query bodys bodyitems = readquery('query.csv') print("query lines: " +json.dumps(bodyitems) +'\n') http = credentials.authorize(httplib2.Http()) service = build('prediction', 'v1.6', http=http) for items in bodyitems: result = service.trainedmodels().predict( project=projid, id=modelid, body=items).execute() querystring = json.dumps(items['input']['csvInstance']) if 'outputValue' in result: resultitem = json.dumps(result['outputValue']) else: resultitem = json.dumps(result['outputLabel']) response_result = 'Result: ' + resultitem # cant write in appengine outputdata(result, 'output') print(querystring + " " +response_result +'\n') self.response.headers['Content-Type'] = 'text/plain' self.response.out.write(querystring + " " +response_result +'\n') except Exception, err: err_str = str(err) self.response.out.write(err_str)
def build_service(self): user = users.get_current_user() key = user.user_id() + '-' + self.SERVICE c = StorageByKeyName(Credentials, key, 'credentials').get() if c is None: url = self.request.relative_url('auth') url += '?service=' + self.SERVICE if hasattr(self, 'SCOPE'): url += '&scope=' + self.SCOPE self.redirect(url) return http = httplib2.Http() http = c.authorize(http) return build(self.SERVICE, 'v1', http=http)
def call_with_creds(cls, gae_user_id, endpoint, **kwargs): """Makes a Google+ API call with a user's stored credentials. Args: gae_user_id: string, App Engine user id used to retrieve the CredentialsModel that stores the user credentials for this call endpoint: string, 'RESOURCE.METHOD', e.g. 'Activities.list' Returns: dict """ credentials = StorageByKeyName(CredentialsModel, gae_user_id, 'credentials').get() assert credentials, 'Credentials not found for user id %s' % gae_user_id return cls.call(credentials.authorize(cls.http), endpoint, **kwargs)
def post(self): try: # Read server-side OAuth 2.0 credentials from datastore and # raise an exception if credentials not found. credentials = StorageByKeyName(CredentialsModel, USER_AGENT, 'credentials').locked_get() if not credentials or credentials.invalid: raise Exception('missing OAuth 2.0 credentials') # Authorize HTTP session with server credentials and obtain # access to prediction API client library. http = credentials.authorize(httplib2.Http()) service = build('prediction', 'v1.6', http=http) # Read and parse JSON model description data. models = parse_json_file(MODELS_FILE) # Get reference to user's selected model. model_name = self.request.get('model') model = models[model_name] # Build prediction data (csvInstance) dynamically based on form input. vals = [] for field in model['fields']: label = field['label'] val = self.request.get(label).encode('utf-8') vals.append(val) body = {'input': {'csvInstance': vals}} logging.info('model:' + model_name + ' body:' + str(body)) # Make a prediction and return JSON results to Javascript client. if model['type'] == 'hosted': ret = service.hostedmodels().predict( project=model['project'], hostedModelName=model['hostedModelName'], body=body).execute() if model['type'] == 'trained': ret = service.trainedmodels().predict(id=model['id'], project=model['project'], body=body).execute() self.response.out.write(json.dumps(ret)) except Exception, err: # Capture any API errors here and pass response from API back to # Javascript client embedded in a special error indication tag. err_str = str(err) if err_str[0:len(ERR_TAG)] != ERR_TAG: err_str = ERR_TAG + err_str + ERR_END self.response.out.write(err_str)
def post(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: self.redirect("/") else: http = httplib2.Http() http = credentials.authorize(http) x = self.request.get("x") y = self.request.get("y") url = insertSQL % (datetime.utcnow().isoformat(' ')[:19], x, y) (response, content) = http.request(queryURL % urllib.quote(url), "POST") self.response.set_status(response['status']) self.response.out.write(content)
def insert_videos(self): '''Inserts the instance videos into the instance YouTube playlist.''' credentials = StorageByKeyName(CredentialsModel, self.user_id, 'credentials').get() http = credentials.authorize(Http()) print "Adding videos:" nb_videos_inserted = 0 for video in self.videos: if (nb_videos_inserted >= YOUTUBE_MAX_VIDEOS_PER_PLAYLIST): break else: body_add_video = { 'snippet': { 'playlistId': self.playlist_id, 'resourceId': { 'kind': "youtube#video", 'videoId': video } } } try: request = YOUTUBE.playlistItems().insert( part=",".join(body_add_video.keys()), body=body_add_video) request.execute(http=http) print " %s: %s ..." % (nb_videos_inserted, video) nb_videos_inserted += 1 # https://cloud.google.com/appengine/articles/deadlineexceedederrors except HttpError: print " %s: KO, inserting %s failed" % \ (nb_videos_inserted, video) except DeadlineExceededError: print " %s: KO, inserting %s failed with DEE" % \ (nb_videos_inserted, video) except AccessTokenRefreshError: print " %s: KO, access token refresh error on %s" % \ (nb_videos_inserted, video) except: print " %s KO, other exception on %s" % \ (nb_videos_inserted, video) # Seems required to avoid YT-thrown exception, # and might help with DeadlineExceededError sleep(2)
def get(self): PageOutput = "" credentials = StorageByKeyName(Credentials, "key_for_credentials", 'credentials').get() StorageByKeyName(Credentials, "key_for_credentials", 'credentials').put(None) PageOutput = "" PageOutput += "<br><br>" if not credentials or credentials.invalid or credentials.refresh_token is None: PageOutput += "No Credentials" pass else: http = httplib2.Http() http = credentials.authorize(http) RevokeURL = "https://accounts.google.com/o/oauth2/revoke?token=" + str(credentials.refresh_token) resp, content = http.request(RevokeURL, "GET") PageOutput += "Bye. Bye. Bye." self.redirect("/")
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: flow = OAuth2WebServerFlow( # Visit https://code.google.com/apis/console to # generate your client_id, client_secret and to # register your redirect_uri. client_id=discovery.CLIENT_ID, client_secret=discovery.CLIENT_SECRET, scope='https://www.googleapis.com/auth/buzz', user_agent='buzz-cmdline-sample/1.0', domain='http://*****:*****@me' http = httplib2.Http() http = credentials.authorize(http) selflist = self._GetList(http, scope='@self', userId=user_id) followerslist = self._GetGroup(http, userId=user_id, groupId='@followers') followinglist = self._GetGroup(http, userId=user_id, groupId='@following') debug_str = user_id + ', ' k, v = self._Count(selflist) debug_str += '(%d, re: %d), ' % (k, v) path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') self.response.out.write( template.render( path, {'selflist': selflist, 'followerslist': followerslist, 'followinglist': followinglist, 'debug_str': debug_str, 'logout': logout }))
def post(self): user = users.get_current_user() credentials = StorageByKeyName(Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: self.redirect("/") else: http = httplib2.Http() http = credentials.authorize(http) x = self.request.get("x") y = self.request.get("y") url = insertSQL % (datetime.utcnow().isoformat(' ')[:19], x, y) (response, content) = http.request(queryURL % urllib.quote(url), "POST") self.response.set_status(response['status']) self.response.out.write(content)
def insert_videos(self): '''Inserts the instance videos into the instance YouTube playlist.''' credentials = StorageByKeyName( CredentialsModel, self.user_id, 'credentials').get() http = credentials.authorize(Http()) print "Adding videos:" nb_videos_inserted = 0 for video in self.videos: if (nb_videos_inserted >= YOUTUBE_MAX_VIDEOS_PER_PLAYLIST): break else: body_add_video = {'snippet': { 'playlistId': self.playlist_id, 'resourceId': {'kind': "youtube#video", 'videoId': video} } } try: request = YOUTUBE.playlistItems().insert( part=",".join(body_add_video.keys()), body=body_add_video ) request.execute(http=http) print " %s: %s ..." % (nb_videos_inserted, video) nb_videos_inserted += 1 # https://cloud.google.com/appengine/articles/deadlineexceedederrors except HttpError: print " %s: KO, inserting %s failed" % \ (nb_videos_inserted, video) except DeadlineExceededError: print " %s: KO, inserting %s failed with DEE" % \ (nb_videos_inserted, video) except AccessTokenRefreshError: print " %s: KO, access token refresh error on %s" % \ (nb_videos_inserted, video) except: print " %s KO, other exception on %s" % \ (nb_videos_inserted, video) # Seems required to avoid YT-thrown exception, # and might help with DeadlineExceededError sleep(2)
def post(self): try: # Read server-side OAuth 2.0 credentials from datastore and # raise an exception if credentials not found. credentials = StorageByKeyName(CredentialsModel, USER_AGENT, 'credentials').locked_get() if not credentials or credentials.invalid: raise Exception('missing OAuth 2.0 credentials') # Authorize HTTP session with server credentials and obtain # access to prediction API client library. http = credentials.authorize(httplib2.Http()) service = build('prediction', 'v1.6', http=http) # Read and parse JSON model description data. models = parse_json_file(MODELS_FILE) # Get reference to user's selected model. model_name = self.request.get('model') model = models[model_name] # Build prediction data (csvInstance) dynamically based on form input. vals = [] for field in model['fields']: label = field['label'] val = self.request.get(label).encode('utf-8') vals.append(val) body = {'input' : {'csvInstance' : vals }} logging.info('model:' + model_name + ' body:' + str(body)) # Make a prediction and return JSON results to Javascript client. if model['type'] == 'hosted': ret = service.hostedmodels().predict(project=model['project'], hostedModelName=model['hostedModelName'], body=body).execute() if model['type'] == 'trained': ret = service.trainedmodels().predict(id=model['id'], project=model['project'], body=body).execute() self.response.out.write(json.dumps(ret)) except Exception, err: # Capture any API errors here and pass response from API back to # Javascript client embedded in a special error indication tag. err_str = str(err) if err_str[0:len(ERR_TAG)] != ERR_TAG: err_str = ERR_TAG + err_str + ERR_END self.response.out.write(err_str)
def get(self): user = users.get_current_user() credentials = StorageByKeyName(Credentials, user.user_id(), 'credentials').get() if not credentials or credentials.invalid: return begin_oauth_flow(self, user) http = credentials.authorize(httplib2.Http()) # Build a service object for interacting with the API. Visit # the Google APIs Console <http://code.google.com/apis/console> # to get a developerKey for your own application. service = build("buzz", "v1", http=http) followers = service.people().list(userId='@me', groupId='@followers').execute() text = 'Hello, you have %s followers!' % followers['totalResults'] path = os.path.join(os.path.dirname(__file__), 'welcome.html') self.response.out.write(template.render(path, {'text': text}))
def get(self): user = users.get_current_user() credentials = StorageByKeyName(Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: callback = self.request.relative_url('/auth_return') authorize_url = FLOW.step1_get_authorize_url(callback) memcache.set(user.user_id(), pickle.dumps(FLOW)) self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) resp, content = http.request('https://api.dailymotion.com/me') path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') variables = {'content': content, 'logout': logout} self.response.out.write(template.render(path, variables))
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if not credentials or credentials.invalid: return begin_oauth_flow(self, user) http = credentials.authorize(httplib2.Http()) # Build a service object for interacting with the API. Visit # the Google APIs Console <http://code.google.com/apis/console> # to get a developerKey for your own application. service = build("buzz", "v1", http=http) followers = service.people().list( userId='@me', groupId='@followers').execute() text = 'Hello, you have %s followers!' % followers['totalResults'] path = os.path.join(os.path.dirname(__file__), 'welcome.html') self.response.out.write(template.render(path, {'text': text }))
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: authorize_url = FLOW.step1_get_authorize_url() self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) resp, content = http.request('https://api.dailymotion.com/me') path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') variables = { 'content': content, 'logout': logout } self.response.out.write(template.render(path, variables))
def insert_videos(self, playlist_id, videos): '''Inserts the instance videos into the instance YouTube playlist.''' credentials = StorageByKeyName( CredentialsModel, self.user_id, 'credentials').get() http = credentials.authorize(Http()) print "Adding videos to playlist %s :" % playlist_id nb_videos_inserted = 0 for video in videos: if (nb_videos_inserted >= YOUTUBE_MAX_VIDEOS_PER_PLAYLIST): break else: body_add_video = dict( snippet=dict( playlistId=playlist_id, resourceId=dict( kind="youtube#video", videoId=video ) ) ) try: request = YOUTUBE.playlistItems().insert( part=",".join(body_add_video.keys()), body=body_add_video ) request.execute(http=http) print " %s: %s ..." % (nb_videos_inserted, video) nb_videos_inserted += 1 except HttpError: print " %s: KO, insertion of %s failed" % \ (nb_videos_inserted, video) except AccessTokenRefreshError: print " %s: KO, access token refresh error on %s" % \ (nb_videos_inserted, video) sleep(0.1) # seems required to avoid YT-thrown exception
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: flow = OAuth2WebServerFlow( client_id='2ad565600216d25d9cde', client_secret='03b56df2949a520be6049ff98b89813f17b467dc', scope='read', user_agent='oauth2client-sample/1.0', auth_uri='https://api.dailymotion.com/oauth/authorize', token_uri='https://api.dailymotion.com/oauth/token' ) callback = self.request.relative_url('/auth_return') authorize_url = flow.step1_get_authorize_url(callback) memcache.set(user.user_id(), pickle.dumps(flow)) self.redirect(authorize_url) else: http = httplib2.Http() resp, content1 = http.request('https://api.dailymotion.com/me?access_token=%s' % credentials.access_token) http = credentials.authorize(http) resp, content2 = http.request('https://api.dailymotion.com/me') path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') self.response.out.write( template.render( path, { 'content1': content1, 'content2': content2, 'logout': logout }))
def get(self): user = users.get_current_user() credentials = StorageByKeyName(Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: callback = self.request.relative_url('/oauth2callback') authorize_url = FLOW.step1_get_authorize_url(callback) memcache.set(user.user_id(), pickle.dumps(FLOW)) self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) service = build("buzz", "v1", http=http) activities = service.activities() activitylist = activities.list(scope='@consumption', userId='@me').execute() path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') self.response.out.write( template.render(path, { 'activitylist': activitylist, 'logout': logout }))
def get(self): user = users.get_current_user() credentials = StorageByKeyName( Credentials, user.user_id(), 'credentials').get() if credentials is None or credentials.invalid == True: callback = self.request.relative_url('/oauth2callback') authorize_url = FLOW.step1_get_authorize_url(callback) memcache.set(user.user_id(), pickle.dumps(FLOW)) self.redirect(authorize_url) else: http = httplib2.Http() http = credentials.authorize(http) service = build("buzz", "v1", http=http) activities = service.activities() activitylist = activities.list(scope='@consumption', userId='@me').execute() path = os.path.join(os.path.dirname(__file__), 'welcome.html') logout = users.create_logout_url('/') self.response.out.write( template.render( path, {'activitylist': activitylist, 'logout': logout }))
def getAuth(userId): credentials = StorageByKeyName(default.CredentialsModel, userId, "credentials").get() http = httplib2.Http() http = credentials.authorize(http) return http
def post(self): user = users.get_current_user() creds = StorageByKeyName(Credentials, user.user_id(), 'credentials').get() http = httplib2.Http() http = creds.authorize(http) service = build(serviceName='calendar', version='v3', http=http, developerKey='AIzaSyD51wdv-kO02p29Aog7OXmL2eEG0F5ngZM') programname = self.request.get('program') racedate = self.request.get('racedate') calsummery = self.request.get('calsummery') callocation = self.request.get('location') racedate = datetime.datetime.strptime(racedate, '%m/%d/%Y') racedate = racedate.date() programQuery = db.Query(Program) programQuery = programQuery.filter('programName = ', programname) results = programQuery.fetch(limit=1) # Had some difficuty building doing a POST to the REST API, will just # use the provided python API instead # url = 'https://www.googleapis.com/calendar/v3/calendars?pp=1&key=AIzaSyD51wdv-kO02p29Aog7OXmL2eEG0F5ngZM' newcal = { 'summary': calsummery, 'timezone': callocation } try: created_calendar = service.calendars().insert(body=newcal).execute() storedcalendar = calendarHTML(userid=user.user_id(), calendarId = created_calendar['id'], calendarName = created_calendar['summary'], calendarTimeZone = callocation, calendarHTML='<iframe src="https://www.google.com/calendar/embed?src='+created_calendar['id']+'&ctz='+callocation+'" style="border:0" width="800" height="600" frameborder="0" scrolling="no"></iframe>') storedcalendar.put() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) #resp, content = http.request(url, method='POST', body=newcal) if len(results) > 0: programReturned = results[0] programWeeksQuery = db.Query(programWeeks) programWeeksQuery.filter('program = ', programReturned.key()) programWeeksQuery.order('week') results = programWeeksQuery.fetch(limit=100) # would like to figure out how to use a jquery progress bar # here, will user an generic spinner annimation instead if len(results) > 0: for result in reversed(results): event = { 'summary': result.sunday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.saturday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.friday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.thursday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.wednesday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.tuesday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) event = { 'summary': result.monday, 'location': callocation, 'start': { 'date': str(racedate) }, 'end' : { 'date': str(racedate) } } try: created_event = service.events().insert(calendarId=created_calendar['id'], body=event).execute() except(DeadlineExceededError, HttpError): doRender(self, 'index.htm', {'error' : 'There was an error on submit, please try again'}) break racedate = racedate + datetime.timedelta(days=-1) doRender(self, 'index.htm', {'programloaded' : 'The program was loaded to your calendar successfully'}) else: doRender(self, 'index.htm', {'error' : 'No weeks were returned from the database!'}) else: doRender(self, 'index.htm', {'error' : 'No programs returned from the DB!'})
class ProcessTasksWorker(webapp2.RequestHandler): """ Process tasks according to data in the ProcessTasksJob entity """ credentials = None user_email = None is_test_user = False process_tasks_job = None tasks_svc = None tasklists_svc = None def _log_progress(self, prefix_msg=""): fn_name = "_log_progress: " if prefix_msg: logging.debug(fn_name + prefix_msg + " - Job status = '" + str(self.process_tasks_job.status) + "', progress: " + str(self.process_tasks_job.total_progress)) else: logging.debug(fn_name + "Job status = '" + str(self.process_tasks_job.status) + "', progress: " + "', progress: " + str(self.process_tasks_job.total_progress)) if self.process_tasks_job.message: logging.debug(fn_name + "Message = " + str(self.process_tasks_job.message)) if self.process_tasks_job.error_message: logging.debug(fn_name + "Error message = " + str(self.process_tasks_job.error_message)) logservice.flush() def post(self): fn_name = "ProcessTasksWorker.post(): " logging.debug(fn_name + "<start> (app version %s)" %appversion.version) logservice.flush() self.user_email = self.request.get(settings.TASKS_QUEUE_KEY_NAME) self.is_test_user = shared.is_test_user(self.user_email) if self.user_email: # Retrieve the DB record for this user self.process_tasks_job = model.ProcessTasksJob.get_by_key_name(self.user_email) if self.process_tasks_job is None: logging.error(fn_name + "No DB record for " + self.user_email) logservice.flush() logging.debug(fn_name + "<End> No DB record") # TODO: Find some way of notifying the user????? # Could use memcache to relay a message which is displayed in ProgressHandler return logging.debug(fn_name + "Retrieved process tasks job for " + str(self.user_email)) logging.debug(fn_name + "Job was requested at " + str(self.process_tasks_job.job_created_timestamp)) logservice.flush() if self.process_tasks_job.status != constants.ExportJobStatus.TO_BE_STARTED: # Very occassionally, GAE starts a 2nd instance of the worker, so we check for that here. # Check when job status was last updated. If it was less than settings.MAX_JOB_PROGRESS_INTERVAL # seconds ago, assume that another instance is already running, log error and exit time_since_last_update = datetime.datetime.now() - self.process_tasks_job.job_progress_timestamp if time_since_last_update.seconds < settings.MAX_JOB_PROGRESS_INTERVAL: logging.error(fn_name + "It appears that worker was called whilst another job is already running for " + str(self.user_email)) logging.error(fn_name + "Previous job requested at " + str(self.process_tasks_job.job_created_timestamp) + " UTC is still running.") logging.error(fn_name + "Previous worker started at " + str(self.process_tasks_job.job_start_timestamp) + " UTC and last job progress update was " + str(time_since_last_update.seconds) + " seconds ago, with status " + str(self.process_tasks_job.status) ) logging.warning(fn_name + "<End> (Another worker is already running)") logservice.flush() return else: # A previous job hasn't completed, and hasn't updated progress for more than # settings.MAX_JOB_PROGRESS_INTERVAL secons, so assume that previous worker # for this job has died. logging.error(fn_name + "It appears that a previous job requested by " + str(self.user_email) + " at " + str(self.process_tasks_job.job_created_timestamp) + " UTC has stalled.") logging.error(fn_name + "Previous worker started at " + str(self.process_tasks_job.job_start_timestamp) + " UTC and last job progress update was " + str(time_since_last_update.seconds) + " seconds ago, with status " + str(self.process_tasks_job.status) + ", progress = ") if self.process_tasks_job.number_of_job_starts > settings.MAX_NUM_JOB_STARTS: logging.error(fn_name + "This job has already been started " + str(self.process_tasks_job.number_of_job_starts) + " times. Giving up") logging.warning(fn_name + "<End> (Multiple job restart failures)") logservice.flush() return else: logging.info(fn_name + "Attempting to restart backup job. Attempt number " + str(self.process_tasks_job.number_of_job_starts + 1)) logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.INITIALISING self.process_tasks_job.number_of_job_starts = self.process_tasks_job.number_of_job_starts + 1 self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self.process_tasks_job.job_start_timestamp = datetime.datetime.now() self.process_tasks_job.message = "Validating background job ..." self._log_progress("Initialising") self.process_tasks_job.put() time_since_job_request = datetime.datetime.now() - self.process_tasks_job.job_created_timestamp logging.debug(fn_name + "Starting job that was requested " + str(time_since_job_request.seconds) + " seconds ago at " + str(self.process_tasks_job.job_created_timestamp) + " UTC") user = self.process_tasks_job.user if not user: logging.error(fn_name + "No user object in DB record for " + str(self.user_email)) logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.ERROR self.process_tasks_job.message = '' self.process_tasks_job.error_message = "Problem with user details. Please restart." self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self._log_progress("No user") self.process_tasks_job.put() logging.debug(fn_name + "<End> No user object") return # self.credentials = self.process_tasks_job.credentials # DEBUG: 2012-09-16; Trying a different method of retrieving credentials, to see if it # allows retrieveal of credentials for TAFE account self.credentials = StorageByKeyName(CredentialsModel, user.user_id(), 'credentials').get() if not self.credentials: logging.error(fn_name + "No credentials in DB record for " + str(self.user_email)) logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.ERROR self.process_tasks_job.message = '' self.process_tasks_job.error_message = "Problem with user credentials. Please restart." self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self._log_progress("No credentials") self.process_tasks_job.put() logging.debug(fn_name + "<End> No credentials") return if self.credentials.invalid: logging.error(fn_name + "Invalid credentials in DB record for " + str(self.user_email)) logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.ERROR self.process_tasks_job.message = '' self.process_tasks_job.error_message = "Invalid credentials. Please restart and re-authenticate." self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self._log_progress("Credentials invalid") logservice.flush() self.process_tasks_job.put() logging.debug(fn_name + "<End> Invalid credentials") return if self.is_test_user: logging.debug(fn_name + "User is test user %s" % self.user_email) logservice.flush() http = httplib2.Http() http = self.credentials.authorize(http) service = discovery.build("tasks", "v1", http=http) self.tasklists_svc = service.tasklists() self.tasks_svc = service.tasks() # ========================================= # Retrieve tasks from the Google server # ========================================= self._export_tasks() else: logging.error(fn_name + "No processing, as there was no user_email key") logservice.flush() logging.debug(fn_name + "<End>") logservice.flush() def _export_tasks(self): fn_name = "_export_tasks: " logging.debug(fn_name + "<Start>") logservice.flush() start_time = datetime.datetime.now() include_hidden = self.process_tasks_job.include_hidden include_completed = self.process_tasks_job.include_completed include_deleted = self.process_tasks_job.include_deleted summary_msg = '' # Retrieve all tasks for the user try: logging.debug(fn_name + "include_completed = " + str(include_completed) + ", include_hidden = " + str(include_hidden) + ", include_deleted = " + str(include_deleted)) logservice.flush() # ############################################## # FLOW # ---------------------------------------------- # For each page of taskslists # For each tasklist # For each page of tasks # For each task # Fix date format # Add tasks to tasklist collection # Add tasklist to tasklists collection # Use tasklists collection to return tasks backup to user self.process_tasks_job.status = constants.ExportJobStatus.BUILDING self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self.process_tasks_job.message = 'Retrieving tasks from server ...' self._log_progress("Building") self.process_tasks_job.put() # This list will contain zero or more tasklist dictionaries, which each contain tasks tasklists = [] total_num_tasklists = 0 total_num_tasks = 0 tasks_per_list = [] # --------------------------------------- # Retrieve all the tasklists for the user # --------------------------------------- logging.debug(fn_name + "Retrieve all the tasklists for the user") logservice.flush() next_tasklists_page_token = None more_tasklists_data_to_retrieve = True while more_tasklists_data_to_retrieve: if self.is_test_user: logging.debug(fn_name + "calling tasklists.list().execute() to create tasklists list") logservice.flush() retry_count = settings.NUM_API_TRIES while retry_count > 0: try: if next_tasklists_page_token: tasklists_data = self.tasklists_svc.list(pageToken=next_tasklists_page_token).execute() else: tasklists_data = self.tasklists_svc.list().execute() # Successfully retrieved data, so break out of retry loop break except Exception, e: retry_count = retry_count - 1 if retry_count > 0: if isinstance(e, AccessTokenRefreshError): # Log first 'n' AccessTokenRefreshError as Info, because they are reasonably common, # and the system usually continues normally after the 2nd call to # "new_request: Refreshing due to a 401" # Occassionally, the system seems to need a 3rd attempt # (i.e., success after waiting 45 seconds) logging.info(fn_name + "Access Token Refresh Error whilst retrieving list of tasklists (1st time, not yet an error). " + str(retry_count) + " attempts remaining: " + shared.get_exception_msg(e)) else: logging.warning(fn_name + "Error retrieving list of tasklists. " + str(retry_count) + " attempts remaining: " + shared.get_exception_msg(e)) logservice.flush() if retry_count <= 2: logging.debug(fn_name + "Giving server an extra chance; Sleeping for " + str(settings.WORKER_API_RETRY_SLEEP_DURATION) + " seconds before retrying") logservice.flush() # Update job_progress_timestamp so that job doesn't time out self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self.process_tasks_job.put() time.sleep(settings.WORKER_API_RETRY_SLEEP_DURATION) else: logging.exception(fn_name + "Still error retrieving list of tasklists after " + str(settings.NUM_API_TRIES) + " attempts. Giving up") logservice.flush() raise e if self.is_test_user and settings.DUMP_DATA: logging.debug(fn_name + "tasklists_data ==>") logging.debug(tasklists_data) logservice.flush() if tasklists_data.has_key(u'items'): tasklists_list = tasklists_data[u'items'] else: # If there are no tasklists, then there will be no 'items' element. This could happen if # the user has deleted all their tasklists. Not sure if this is even possible, but # checking anyway, since it is possible to have a tasklist without 'items' (see issue #9) logging.debug(fn_name + "User has no tasklists.") logservice.flush() tasklists_list = [] # tasklists_list is a list containing the details of the user's tasklists. # We are only interested in the title # if self.is_test_user and settings.DUMP_DATA: # logging.debug(fn_name + "tasklists_list ==>") # logging.debug(tasklists_list) # --------------------------------------- # Process all the tasklists for this user # --------------------------------------- for tasklist_data in tasklists_list: total_num_tasklists = total_num_tasklists + 1 if self.is_test_user and settings.DUMP_DATA: logging.debug(fn_name + "tasklist_data ==>") logging.debug(tasklist_data) logservice.flush() """ Example of a tasklist entry; u'id': u'MDAxNTkzNzU0MzA0NTY0ODMyNjI6MDow', u'kind': u'tasks#taskList', u'selfLink': u'https://www.googleapis.com/tasks/v1/users/@me/lists/MDAxNTkzNzU0MzA0NTY0ODMyNjI6MDow', u'title': u'Default List', u'updated': u'2012-01-28T07:30:18.000Z'}, """ tasklist_title = tasklist_data[u'title'] tasklist_id = tasklist_data[u'id'] if self.is_test_user and settings.DUMP_DATA: logging.debug(fn_name + "Process all the tasks in " + str(tasklist_title)) logservice.flush() # ===================================================== # Process all the tasks in this task list # ===================================================== tasklist_dict, num_tasks = self._get_tasks_in_tasklist(tasklist_title, tasklist_id, include_hidden, include_completed, include_deleted) # Track number of tasks per tasklist tasks_per_list.append(num_tasks) total_num_tasks = total_num_tasks + num_tasks self.process_tasks_job.total_progress = total_num_tasks self.process_tasks_job.tasklist_progress = 0 # Because total_progress now includes num_tasks for current tasklist self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self.process_tasks_job.message = '' self._log_progress("Processed tasklist") self.process_tasks_job.put() # if self.is_test_user: # logging.debug(fn_name + "Adding %d tasks to tasklist" % len(tasklist_dict[u'tasks'])) # Add the data for this tasklist (including all the tasks) into the collection of tasklists tasklists.append(tasklist_dict) # Check if there is another page of tasklists to be retrieved if tasklists_data.has_key('nextPageToken'): # There is another page of tasklists to be retrieved for this user, # which we'll retrieve next time around the while loop. # This happens if there is more than 1 page of tasklists. # It seems that each page contains 20 tasklists. more_tasklists_data_to_retrieve = True # Go around while loop again next_tasklists_page_token = tasklists_data['nextPageToken'] # if self.is_test_user: # logging.debug(fn_name + "There is (at least) one more page of tasklists to be retrieved") else: # This is the last (or only) page of results (list of tasklists) more_tasklists_data_to_retrieve = False next_tasklists_page_token = None # *** end while more_tasks_data_to_retrieve *** # ------------------------------------------------------ # Store the data, so we can return it to the user # ------------------------------------------------------ """ tasklists is a list of tasklist structures structure of tasklist { "title" : tasklist.title, # Name of this tasklist "tasks" : [ task ] # List of task items in this tasklist } structure of task { "title" : title, # Free text "status" : status, # "completed" | "needsAction" "id" : id, # Used when determining parent-child relationships "parent" : parent, # OPT: ID of the parent of this task (only if this is a sub-task) "notes" : notes, # OPT: Free text "due" : due, # OPT: Date due, e.g. 2012-01-30T00:00:00.000Z NOTE time = 0 "updated" : updated, # Timestamp, e.g., 2012-01-26T07:47:18.000Z "completed" : completed # Timestamp, e.g., 2012-01-27T10:38:56.000Z } """ # Delete existing backup data records tasklist_data_records = model.TasklistsData.gql("WHERE ANCESTOR IS :1", db.Key.from_path(settings.DB_KEY_TASKS_BACKUP_DATA, self.user_email)) num_records = tasklist_data_records.count() logging.debug(fn_name + "Deleting " + str(num_records) + " old blobs") logservice.flush() for tasklists_data_record in tasklist_data_records: tasklists_data_record.delete() # logging.debug(fn_name + "Pickling tasks data ...") pickled_tasklists = pickle.dumps(tasklists) # logging.debug(fn_name + "Pickled data size = " + str(len(pickled_tasklists))) data_len = len(pickled_tasklists) # Multiply by 1.0 float value so that we can use ceiling to find number of Blobs required num_of_blobs = int(math.ceil(data_len * 1.0 / constants.MAX_BLOB_SIZE)) logging.debug(fn_name + "Calculated " + str(num_of_blobs) + " blobs required to store " + str(data_len) + " bytes") logservice.flush() try: for i in range(num_of_blobs): # Write backup data records tasklist_rec = model.TasklistsData(db.Key.from_path(settings.DB_KEY_TASKS_BACKUP_DATA, self.user_email)) slice_start = int(i*constants.MAX_BLOB_SIZE) slice_end = int((i+1)*constants.MAX_BLOB_SIZE) # logging.debug(fn_name + "Creating part " + str(i+1) + " of " + str(num_of_blobs) + # " using slice " + str(slice_start) + " to " + str(slice_end)) pkl_part = pickled_tasklists[slice_start : slice_end] tasklist_rec.pickled_tasks_data = pkl_part tasklist_rec.idx = i tasklist_rec.put() # logging.debug(fn_name + "Marking backup job complete") end_time = datetime.datetime.now() process_time = end_time - start_time proc_time_str = str(process_time.seconds) + "." + str(process_time.microseconds)[:3] + " seconds" # Mark backup completed summary_msg = "Retrieved %d tasks from %d tasklists" % (total_num_tasks, total_num_tasklists) breakdown_msg = "Tasks per list: " + str(tasks_per_list) self.process_tasks_job.status = constants.ExportJobStatus.EXPORT_COMPLETED self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() # self.process_tasks_job.message = summary_msg + " in " + proc_time_str self.process_tasks_job.message = summary_msg + " at " + \ start_time.strftime("%H:%M UTC, %a %d %b %Y") logging.info(fn_name + "COMPLETED: " + summary_msg + " for " + self.user_email + " in " + proc_time_str) logservice.flush() self.process_tasks_job.put() try: end_time = datetime.datetime.now() process_time = end_time - start_time processing_time = process_time.days * 3600*24 + process_time.seconds + process_time.microseconds / 1000000.0 included_options_str = "Includes: Completed = %s, Deleted = %s, Hidden = %s" % (str(include_completed), str(include_deleted), str(include_hidden)) logging.debug(fn_name + "STATS: Job started at " + str(self.process_tasks_job.job_start_timestamp) + "\n Worker started at " + str(start_time) + "\n " + summary_msg + "\n " + breakdown_msg + "\n " + proc_time_str + "\n " + included_options_str) logservice.flush() usage_stats = model.UsageStats( user_hash = hash(self.user_email), number_of_tasks = self.process_tasks_job.total_progress, number_of_tasklists = total_num_tasklists, tasks_per_tasklist = tasks_per_list, include_completed = include_completed, include_deleted = include_deleted, include_hidden = include_hidden, start_time = start_time, processing_time = processing_time) usage_stats.put() logging.debug(fn_name + "Saved stats") logservice.flush() except Exception, e: logging.exception("Error saving stats") logservice.flush() # Don't bother doing anything else, because stats aren't critical except apiproxy_errors.RequestTooLargeError, e: logging.exception(fn_name + "Error putting results in DB - Request too large") logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.ERROR self.process_tasks_job.message = '' self.process_tasks_job.error_message = "Tasklists data is too large - Unable to store tasklists in DB: " + shared.get_exception_msg(e) self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self._log_progress("apiproxy_errors.RequestTooLargeError") self.process_tasks_job.put() except Exception, e: logging.exception(fn_name + "Error putting results in DB") logservice.flush() self.process_tasks_job.status = constants.ExportJobStatus.ERROR self.process_tasks_job.message = '' self.process_tasks_job.error_message = "Unable to store tasklists in DB: " + shared.get_exception_msg(e) self.process_tasks_job.job_progress_timestamp = datetime.datetime.now() self._log_progress("Exception") self.process_tasks_job.put()