def import_spreadsheet(request): import re import models setup_token(client) # Figure out what spreadsheet to import spreadsheet = request.GET.get('spreadsheet', '0Ar7e9bY7dwnBdEIzUk5kSk5CZ0kyYXVrempkWW80Snc') # If they entered a URL, extract the key if spreadsheet.find('google.com') > -1: spreadsheet_key = re.search('key=([^(?|&)]*)', spreadsheet).group(1) else: spreadsheet_key = spreadsheet # We're assuming first worksheet for convenience worksheet_id = 'od6' # Retrieve the "values" projection of the list feed, the most DB-like feed list_feed = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/values' % (spreadsheet_key, worksheet_id) feed = client.get_feed(list_feed, desired_class=gdata.spreadsheets.data.ListsFeed) # For each row, save it as a datastore entity sheet = '' for row in feed.entry: sheet += unicode(row) # firstname = row.get_value('firstname') # lastname = row.get_value('lastname') # email = row.get_value('email') # person = models.Person(firstname=firstname, lastname=lastname, email=email) # person.save() #return HttpResponse('Saved %s rows' % str(len(feed.entry))) return HttpResponse(sheet)
def copy_spreadsheet(request): setup_token(client) doc_feed = 'https://docs.google.com/feeds/default/private/full/' feed = client.get_feed(doc_feed, desired_class=gdata.docs.data.DocList) docs = [] for doc in feed.entry: docs.append("%s = %s" % (doc.id.text, doc.title.text)) return HttpResponse('\n'.join(docs), mimetype="text/plain")
def select_worksheet(request, spreadsheet_id, spreadsheet_title): setup_token(request.user.id) # Retrieve the worksheet options list_feed = 'https://spreadsheets.google.com/feeds/worksheets/%s/private/full' % (spreadsheet_id) feed = client.get_feed(list_feed, desired_class=gdata.spreadsheets.data.SpreadsheetsFeed) sheets = {} for entry in feed.entry: sheets[entry.title.text] = re.search(r'worksheets/[^/]*/([^/]*)', entry.id.text).group(1) if len(sheets.items()) == 1: key, value = sheets.items()[0] return HttpResponseRedirect(reverse(import_spreadsheet, args=[spreadsheet_id, value, spreadsheet_title, key])) return render_to_response('importing/select_worksheet.html', { 'sheets': sheets, 'spreadsheet_id': spreadsheet_id, 'spreadsheet_title': spreadsheet_title}, context_instance=RequestContext(request))
def myGoogleDocs(request): settings = request.user.get_profile() Row.objects.filter(settings=settings).delete() access_token = gdata.gauth.AeLoad('ACCESS_TOKEN' + str(request.user.id)) if not isinstance(access_token, gdata.gauth.OAuthHmacToken): return HttpResponseRedirect(reverse(get_oauth_token)) setup_token(request.user.id) # Retrieve the spreadsheet options list_feed = 'https://spreadsheets.google.com/feeds/spreadsheets/private/full' feed = client.get_feed(list_feed, desired_class=gdata.spreadsheets.data.SpreadsheetsFeed) sheets = {} for entry in feed.entry: sheets[entry.title.text] = re.search(r'spreadsheets/([^/]*)', entry.id.text).group(1) return render_to_response('importing/myGoogleDocs.html', { 'sheets': sheets}, context_instance=RequestContext(request))
def myGoogleDocs(request): settings = request.user.get_profile() Row.objects.filter(settings=settings).delete() access_token = gdata.gauth.AeLoad('ACCESS_TOKEN' + str(request.user.id)) if not isinstance(access_token, gdata.gauth.OAuthHmacToken): return HttpResponseRedirect(reverse(get_oauth_token)) setup_token(request.user.id) # Retrieve the spreadsheet options list_feed = 'https://spreadsheets.google.com/feeds/spreadsheets/private/full' feed = client.get_feed( list_feed, desired_class=gdata.spreadsheets.data.SpreadsheetsFeed) sheets = {} for entry in feed.entry: sheets[entry.title.text] = re.search(r'spreadsheets/([^/]*)', entry.id.text).group(1) return render_to_response('importing/myGoogleDocs.html', {'sheets': sheets}, context_instance=RequestContext(request))
def select_worksheet(request, spreadsheet_id, spreadsheet_title): setup_token(request.user.id) # Retrieve the worksheet options list_feed = 'https://spreadsheets.google.com/feeds/worksheets/%s/private/full' % ( spreadsheet_id) feed = client.get_feed( list_feed, desired_class=gdata.spreadsheets.data.SpreadsheetsFeed) sheets = {} for entry in feed.entry: sheets[entry.title.text] = re.search(r'worksheets/[^/]*/([^/]*)', entry.id.text).group(1) if len(sheets.items()) == 1: key, value = sheets.items()[0] return HttpResponseRedirect( reverse(import_spreadsheet, args=[spreadsheet_id, value, spreadsheet_title, key])) return render_to_response('importing/select_worksheet.html', { 'sheets': sheets, 'spreadsheet_id': spreadsheet_id, 'spreadsheet_title': spreadsheet_title }, context_instance=RequestContext(request))
def import_spreadsheet(request, spreadsheet_key, worksheet_id, spreadsheet_title, worksheet_title): setup_token(request.user.id) # Retrieve the "values" projection of the list feed, the most DB-like feed list_feed = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/values' % ( spreadsheet_key, worksheet_id) feed = client.get_feed(list_feed, desired_class=gdata.spreadsheets.data.ListsFeed) settings = request.user.get_profile() Row.objects.filter(settings=settings).delete() # For each row, save it as a datastore entity colDict = {} for i, row in enumerate(feed.entry): rowd = row.to_dict() for col in rowd.keys(): if not colDict.__contains__(col): colDict[col] = {} colDict[col][i] = rowd[col] logging.info("import Dictionary: \n%s" % (colDict)) colDictKeys = colDict.keys() for i in range(len(feed.entry)): collist = [] for k in colDictKeys: if colDict[k].__contains__(i) and colDict[k][i] != None: value = colDict[k][i] else: value = '' collist.append(value) Row.objects.create(settings=settings, columns=collist, order=i) logging.info("row object: \n%s" % (collist)) request.session['colNames'] = colDictKeys return HttpResponseRedirect( reverse(choose_fields, args=[spreadsheet_title, worksheet_title]))
def import_spreadsheet(request, spreadsheet_key, worksheet_id, spreadsheet_title, worksheet_title): setup_token(request.user.id) # Retrieve the "values" projection of the list feed, the most DB-like feed list_feed = 'https://spreadsheets.google.com/feeds/list/%s/%s/private/values' % (spreadsheet_key, worksheet_id) feed = client.get_feed(list_feed, desired_class=gdata.spreadsheets.data.ListsFeed) settings = request.user.get_profile() Row.objects.filter(settings=settings).delete() # For each row, save it as a datastore entity colDict = {} for i, row in enumerate(feed.entry): rowd = row.to_dict() for col in rowd.keys(): if not colDict.__contains__(col): colDict[col] = {} colDict[col][i] = rowd[col] logging.info("import Dictionary: \n%s" % (colDict)) colDictKeys = colDict.keys() for i in range(len(feed.entry)): collist = [] for k in colDictKeys: if colDict[k].__contains__(i) and colDict[k][i] != None: value = colDict[k][i] else: value = '' collist.append(value) Row.objects.create(settings=settings, columns=collist, order=i) logging.info("row object: \n%s" % (collist)) request.session['colNames'] = colDictKeys return HttpResponseRedirect(reverse(choose_fields, args=[spreadsheet_title, worksheet_title]))