def _create_spreadsheet(sheets_client: discovery.Resource) -> str: """ :return: spreadsheetId """ request = { "properties": {"title": "integration_test_spreadsheet"}, "sheets": [{"properties": {"title": "sheet1"}}, {"properties": {"title": "sheet2"}}], } spreadsheet = Spreadsheet.parse_obj(sheets_client.create(body=request).execute()) spreadsheet_id = spreadsheet.spreadsheetId rows = [["header1", "irrelevant", "header3", "", "ignored"]] rows.extend([f"a{i}", "dontmindme", i] for i in range(300)) rows.append(["lonely_left_value", "", ""]) rows.append(["", "", "lonelyrightvalue"]) rows.append(["", "", ""]) rows.append(["orphan1", "orphan2", "orphan3"]) sheets_client.values().batchUpdate( spreadsheetId=spreadsheet_id, body={"data": {"majorDimension": "ROWS", "values": rows, "range": "sheet1"}, "valueInputOption": "RAW"}, ).execute() sheets_client.values().batchUpdate( spreadsheetId=spreadsheet_id, body={"data": {"majorDimension": "ROWS", "values": rows, "range": "sheet2"}, "valueInputOption": "RAW"}, ).execute() return spreadsheet_id
def insert_event( service: Resource, course_name: str, calendar_ids: Dict, body: Dict, test: bool = False, ) -> None: """Insert an event (but only if the test flag is not enabled).""" print_event(body) if not test: service.events().insert( calendarId=calendar_ids[course_name], body=body, ).execute()
def get_first_row(client: discovery.Resource, spreadsheet_id: str, sheet_name: str) -> List[str]: spreadsheet = Spreadsheet.parse_obj( client.get(spreadsheetId=spreadsheet_id, includeGridData=True, ranges=f"{sheet_name}!1:1").execute()) # There is only one sheet since we are specifying the sheet in the requested ranges. returned_sheets = spreadsheet.sheets if len(returned_sheets) != 1: raise Exception( f"Unexpected return result: Sheet {sheet_name} was expected to contain data on exactly 1 sheet. " ) range_data = returned_sheets[0].data if len(range_data) != 1: raise Exception( f"Expected data for exactly one range for sheet {sheet_name}") all_row_data = range_data[0].rowData if len(all_row_data) != 1: raise Exception( f"Expected data for exactly one row for sheet {sheet_name}") first_row_data = all_row_data[0] return Helpers.get_formatted_row_values(first_row_data)
def _get_list_result_with_fields( self, collection: Resource, filters: Mapping[str, str], field_selectors: Sequence[str]) -> List[str]: assert field_selectors part = field_selectors[0] results = [] request = collection.list(part=part, **filters) while request is not None: response = self._execute_with_repeat(request) for item in response['items']: tmp = item for field_selector in field_selectors: tmp = tmp[field_selector] results.append(tmp) request = collection.list_next(request, response) return results
def add_video_to_matching_playlist(youtube: Resource, title: str, video_id: str) -> None: VALID_PLAYLISTS = { re.compile(r'^Game Engine '): 'PLDKWjXpoaOxtT0OyavgpNPiqH8PL-AjY4', re.compile(r'^Bonus Stream '): 'PLDKWjXpoaOxsF_5Lc_zsR0lJTh0viGSIK', } playlist_id = [ p_id for regex, p_id in VALID_PLAYLISTS.items() if regex.match(title) ] if len(playlist_id) != 1: raise PlaylistError( f'Could not determine playlist for video "{title}" ({video_id})') playlist_id = playlist_id[0] body = { 'snippet': { 'playlistId': playlist_id, 'resourceId': { 'kind': 'youtube#video', 'videoId': video_id, }, }, } insert_request = youtube.playlistItems().insert( part=','.join(body.keys()), body=body, ) response = insert_request.execute(num_retries=3) if response is not None and 'id' in response: log.info(f'Playlist item {response["id"]} was successfully added.') else: raise PlaylistError(f'Invalid response: {response}')
def get_sheets(service: discovery.Resource, docid: str) -> List[Sheet]: "Get the sheet titles and ids of the given spreadsheet." resp = service.spreadsheets().get(spreadsheetId=docid).execute() return [ Sheet(docid, sheet['properties']['sheetId'], sheet['properties']['title']) for sheet in resp['sheets'] ]
def get_sheets_in_spreadsheet(client: discovery.Resource, spreadsheet_id: str): spreadsheet_metadata = Spreadsheet.parse_obj( client.get(spreadsheetId=spreadsheet_id, includeGridData=False).execute()) return [ sheet.properties.title for sheet in spreadsheet_metadata.sheets ]
def iter_sheet(service: discovery.Resource, sheet: Sheet) -> Generator[List[str], None, None]: "Iterate over the contents of a particular sheet." size = get_sheet_size(service, sheet) resp = service.spreadsheets().values().batchGet( spreadsheetId=sheet.docid, ranges=sheets_upload.sheet_range(size[0], size[1], sheet.name)).execute() for row in resp['valueRanges'][0]['values']: yield row
def insert_calendar( service: Resource, body: Dict, course_name: str, calendar_ids: Dict, test: bool = False, ) -> None: """Insert a calendar (but only if the test flag is not enabled).""" print_calendar(body) if not test: response_cal = service.calendars().insert(body=body, ).execute() calendar_ids[course_name] = response_cal['id']
def get_docids_from_index(files: discovery.Resource): """Given a files service, get the doc list of doc ids from the index page. """ html = files.export(fileId=INDEX_DOCID, mimeType='text/html').execute() doc = bs4.BeautifulSoup(html, 'lxml') for a in doc.find_all('a'): href = a['href'] #print(href) match = re.search(r'document/d/(.*)/', href) if not match: continue #print(match.group(1)) yield match.group(1)
def upload_video_to_youtube( youtube: Resource, filename: str, title: str, description: str, category: str, privacy: str, publish_at: Optional[datetime.datetime] = None, tags: Optional[Collection[str]] = None, ) -> str: body = { 'snippet': { 'title': title, 'description': description, 'tags': tags, 'categoryId': category, }, 'status': { 'privacyStatus': privacy, }, } if publish_at is not None: body['status']['publishAt'] = publish_at.isoformat() # Call the API's videos.insert method to create and upload the video. insert_request = youtube.videos().insert( part=','.join(body.keys()), body=body, # The chunksize parameter specifies the size of each chunk of data, in bytes, that will be uploaded at a time. # Set a higher value for reliable connections as fewer chunks lead to faster uploads. Set a lower value for # better recovery on less reliable connections. # # Setting 'chunksize' equal to -1 in the code below means that the entire file will be uploaded in a single HTTP # request. (If the upload fails, it will still be retried where it left off.) This is usually a best practice, # but if you're using Python older than 2.6 or if you're running on App Engine, you should set the chunksize to # something like 1024 * 1024 (1 megabyte). media_body=MediaFileUpload(filename, chunksize=-1, resumable=True), ) return _resumable_upload(insert_request)
def create_sheet( self, student: Student, spreadsheets_resource: discovery.Resource) -> GoogleSpreadsheet: """Add the raw data to the sheet.""" target_schools = TargetSchool.objects.filter(student=student) target_schools = target_schools.select_related("school", "school_application") prefetch = Prefetch("milestones", queryset=Milestone.objects.all().order_by("date")) target_schools = target_schools.prefetch_related(prefetch) target_schools = target_schools.order_by("school__name") row_data = [self.build_header_row()] row_data.extend(self.build_school_rows(target_schools)) data = { "properties": { "title": "{} - Application Status".format(student) }, "sheets": [{ "properties": { "gridProperties": { "frozenRowCount": 1, "frozenColumnCount": 1 } }, "data": [{ "startRow": 0, "startColumn": 0, "rowData": row_data }], }], } response = spreadsheets_resource.create(body=data).execute() return GoogleSpreadsheet( response["spreadsheetId"], response["sheets"][0]["properties"]["sheetId"], target_schools.count(), )
def format_sheet( self, google_spreadsheet: GoogleSpreadsheet, spreadsheets_resource: discovery.Resource, ) -> None: """Format the newly created sheet. The create API isn't designed to work with ranges so this method sets the formatting of the newly created sheet. """ sheet_id = google_spreadsheet.sheet_id data = { "requests": [ # Set default font. { "repeatCell": { "range": { "sheetId": sheet_id }, "cell": { "userEnteredFormat": { "textFormat": { "fontFamily": "Average", "fontSize": 10 } } }, "fields": "userEnteredFormat.textFormat", } }, # Update header row font properties. { "repeatCell": { "range": { "sheetId": sheet_id, "endRowIndex": 1 }, "cell": { "userEnteredFormat": { "textFormat": { "fontSize": 12, "bold": True, "underline": True, } } }, "fields": "userEnteredFormat.textFormat(fontSize,bold,underline)", # noqa } }, # Autoresize columns that are too narrow. { "autoResizeDimensions": { "dimensions": { "sheetId": sheet_id, "dimension": "COLUMNS" } } }, # Allow more space for the essay list prompts. { "updateDimensionProperties": { "range": { "sheetId": sheet_id, "dimension": "COLUMNS", "startIndex": 6, "endIndex": 7, }, "properties": { "pixelSize": 314 }, "fields": "pixelSize", } }, # Border columns that need borders. self.border_column_right(4, sheet_id), self.border_column_right(5, sheet_id), self.border_column_right(7, sheet_id), self.border_column_right(9, sheet_id), self.border_column_right(11, sheet_id), self.border_column_right(13, sheet_id), self.border_column_right(15, sheet_id), self.border_column_right(17, sheet_id), ] } # Border rows between school groups. # Shift by 1 on the range to get the math right. for i in range(1, google_spreadsheet.schools_count + 1): # It seems like this should add 1 to account for the header row, # but rows are zero indexed so leaving off an increment # does the right thing. row_index = i * SCHOOL_GROUP_SIZE data["requests"].append(self.border_row_bottom( row_index, sheet_id)) spreadsheets_resource.batchUpdate( spreadsheetId=google_spreadsheet.spreadsheet_id, body=data).execute()
def get_sheet_size(service: discovery.Resource, sheet: Sheet): "Get the size of a spreadsheet." resp = service.spreadsheets().get(spreadsheetId=sheet.docid, ranges=sheet.name).execute() grid_props = resp['sheets'][0]['properties']['gridProperties'] return (grid_props['rowCount'], grid_props['columnCount'])