def getAuthenticatedClient(user): try: with open("token", "r") as f: auth_token = f.readline().strip() try: client = gdata.docs.client.DocsClient( auth_token=gdata.gauth.ClientLoginToken(auth_token), source='jasharpe-docssync-v1') client.ssl = True client.http_client.debug = False # check that we're authorized client.GetDocList(limit=0) except gdata.client.Unauthorized as e: print "Bad authentication information from saved token." raise e except (IOError, gdata.client.Unauthorized): client = gdata.docs.client.DocsClient(source='jsharpe-docsync-v1') client.ssl = True client.http_client.debug = False while 1: # get password password = getpass.getpass("password for %s: " % user) try: client.ClientLogin(user, password, client.source) break except gdata.client.BadAuthentication as e: print "Bad authentication information." f = open("token", "w") f.write(client.auth_token.token_string) f.close() return client
def export(username, password, docs_name, output_format, dest_path='' ): """ To download we need: - filename - google doc ressource_id - destination path """ logger = logging.getLogger('edgdd') try: client = gdata.docs.client.DocsClient(source='edggd-v0.5') client.ssl = True client.http_client.debug = False client.ClientLogin(username, password, source=client.source) gs_client = gdata.spreadsheet.service.SpreadsheetsService(source='edggd-v0.5') gs_client.ClientLogin(username, password, source=gs_client.source) except gdata.service.BadAuthentication: print "Error : Authentification refused by Google. Check supplied username and password." sys.exit(-2) for doc_name in docs_name: feed = client.GetDocList(uri=("/feeds/default/private/full?title=%s&title-exact=true&max-results=5" % (doc_name,))) if not feed.entry : print "Error : Document '%s' not found !" % (doc_name, ) sys.exit(-3) # now we check query returns only one file if len(feed.entry) > 1 : print "Warning : Query for '%s' returned %i documents!!!" % (doc_name, len(feed.entry) ) entry_counter = 0 for entry in feed.entry : resource_id = entry.resource_id.text doc_type = resource_id[:resource_id.find(':')] if dest_path: dest_path += '/' if dest_path[-1] <> '/' else '' if entry_counter : file_path = dest_path+doc_name+'.'+output_format+'.'+str(entry_counter) else : file_path = dest_path+doc_name+'.'+output_format # When downloading a spreadsheet, the authenticated request needs to be # sent with the spreadsheet service's auth token. if doc_type == 'spreadsheet': docs_token = client.auth_token # we save token client.auth_token = gdata.gauth.ClientLoginToken(gs_client.GetClientLoginToken()) client.Export(resource_id, file_path, gid=0) client.auth_token = docs_token # restore the DocList auth token else: # download a doc not a spreadsheet client.Export(entry, file_path) print "Info : %s exported" % (file_path,) entry_counter+=1
def __init__(self, parent): QTreeWidget.__init__(self, parent) import gdata import gdata.docs.client client = gdata.docs.client.DocsClient() client.ClientLogin('*****@*****.**', 'oWertryN8..//vim', API().config.info.title) self.feed = client.GetDocList() self.fill()
def get_folder_list(request): """Test callback view""" if request.session.get(GOOGLE_OAUTH_TOKEN, False): client = get_client( request.session[GOOGLE_OAUTH_TOKEN].token, request.session[GOOGLE_OAUTH_TOKEN].token_secret, ) feed = client.GetDocList(uri='/feeds/default/private/full/-/folder') c = { 'feed': feed, } return render_to_response("google_reftagger_folder_list.html", c, context_instance=RequestContext(request)) elif request.session.get(GOOGLE_OAUTH_REQ_TOKEN, False): oauth_get_access_token(request) return HttpResponseRedirect("http://" + request.get_host() + request.path) else: return oauth_start(request)
def docdownload(request, facility_id): """ Download google docs document """ if 'token' in request.session: #should be able to make this global client = gdata.docs.client.DocsClient() client.ssl = True # Force all API requests through HTTPS client.http_client.debug = False # Set to True for debugging HTTP requests client.auth_token = gdata.gauth.AuthSubToken(request.session['token']) supply_point = get_object_or_404(SupplyPoint, pk=facility_id) query_string = '/feeds/default/private/full?title=%s&title-exact=false&max-results=100' % supply_point.code feed = client.GetDocList(uri=query_string) most_recent_doc = None if not feed.entry: messages.error(request, 'Sorry, there is no recent R&R for this facility.') return HttpResponseRedirect( reverse("tz_facility_details", args=[supply_point.pk])) else: for entry in feed.entry: if not most_recent_doc: most_recent_doc = entry else: new_date = string_to_datetime(entry.updated.text) old_date = string_to_datetime(most_recent_doc.updated.text) if new_date > old_date: most_recent_doc = entry exportFormat = '&exportFormat=pdf' content = client.GetFileContent(uri=most_recent_doc.content.src + exportFormat) response = HttpResponse(content) response['content-Type'] = 'application/pdf' response[ 'Content-Disposition'] = 'inline; filename=%s' % most_recent_doc.title.text return response
def docify(prepend): """creates and uploads a shared google doc from the template in filename This method is only called when adding a new trip. This first alters the template document to have the trip info stored on the first line of the file. It then proceeds to set up a client for the google docs interactions using my secret credentials. The edited template is uploaded with a generic name and the link to the document is shortened to be returned. The document is then found in the DocsList and the ACL permissions are altered so anyone can edit the file. Before returning the shortened link, the template is restored by removing the trip description from the first line. """ link = '' add_first(prepend) client = gdata.docs.client.DocsClient(source='trailbot') client.ClientLogin(sekret_username, sekret_password, client.source) entry = client.Upload(filename, 'trip', content_type='text/plain') link = tinyurl.create_one(entry.GetAlternateLink().href) feed = client.GetDocList( uri='https://docs.google.com/feeds/default/private/full') doc_entry = feed.entry[0] scope = gdata.acl.data.AclScope(type='default') role = gdata.acl.data.AclRole(value='writer') acl_entry = gdata.docs.data.Acl(scope=scope, role=role) client.Post(acl_entry, doc_entry.GetAclFeedLink().href) remove_first() return link
def dedocify(to_remove): """trashes a matching google doc from my account This method is called when removing a trip with keywords about the trip. After logging in to the google account, the DocsList feed is searched for files containing the trip keywords, then the matching file is trashed. A matching trip has already been found when this method is called, so as long as the google doc was generated when the trip was added, a match should be found to trash. """ client = gdata.docs.client.DocsClient(source='trailbot') client.ClientLogin(sekret_username, sekret_password, client.source) search = to_remove.replace(' ', '+') urluri = 'https://docs.google.com/feeds/default/private/full?q=' + search feed = client.GetDocList(uri=urluri) if feed.entry: doc_entry = feed.entry[0] client.Delete(doc_entry)
def get_folder_contents(request, resource_id): if request.session.get(GOOGLE_OAUTH_TOKEN, False): client = get_client( request.session[GOOGLE_OAUTH_TOKEN].token, request.session[GOOGLE_OAUTH_TOKEN].token_secret, ) folder = client.GetDoc(resource_id) feed = client.GetDocList(uri=folder.content.src + "/-/document") c = { 'folder': folder, 'feed': feed, } return render_to_response("google_reftagger_folder.html", c, context_instance=RequestContext(request)) elif request.session.get(GOOGLE_OAUTH_REQ_TOKEN, False): oauth_get_access_token(request) return HttpResponseRedirect("http://" + request.get_host() + request.path) else: return oauth_start(request)
def PrintFeed(feed): print '\n' if not feed.entry: print 'No entries in feed.\n' for entry in feed.entry: print entry.title.text.encode( 'UTF-8'), entry.GetDocumentType(), entry.resource_id.text # List folders the document is in. for folder in entry.InFolders(): print folder.title feed = client.GetDocList( uri='/feeds/[email protected]/private/full?showfolders=true') PrintFeed(feed) #new_folder= client.Create(gdata.docs.data.FOLDER_LABEL, 'Research') import gdata.data ms = gdata.data.MediaSource( file_path='/home/alfaceor/Research/Eskeleton_ProteinFolding.txt', content_type='text/plain') #feed = client.GetDocList(uri='/feeds/default/private/full?title=Research&title-exact=true&max-results=5') feed = client.GetDocList(uri='/feeds/default/private/full/-/folder') PrintFeed(feed) research_folder = 'https://docs.google.com/feeds/default/private/full/folder%3A0B_kALGdOkvLYOGY4MDA0NTItMzFiNS00YTA2LThiYzYtOTA2NjJlMTM5Njc1/contents' entry = client.Upload(ms, 'Eskeleton_ProteinFolding', folder_or_uri=research_folder)
print 'Exported %s as ' % entry.title.text continue elif entry.GetDocumentType() == 'document': if not file_path.endswith('.doc'): file_path += '.doc' print 'Exporting document to %s...' % file_path client.Export(entry, file_path) print 'Exported %s as a .doc file' % entry.title.text else: if entry.GetDocumentType( ) == 'image/png' and not file_path.endswith('.png'): file_path += '.png' elif entry.GetDocumentType( ) == 'image/jpg' and not file_path.endswith('.jpg'): file_path += '.jpg' elif entry.GetDocumentType( ) == 'image/jpeg' and not file_path.endswith('.jpg'): file_path += '.jpg' print 'Downloading file to %s...' % file_path client.Download(entry, file_path) print 'Downloaded %s' % entry.title.text continue # List folders the document is in. for folder in entry.InFolders(): print folder.title feed = client.GetDocList() PrintFeed(feed)