def model_cell(wks: Worksheet, index: str) -> Cell: '''Prepare model_cell to apply to DataRange in main program''' cell = Cell(index) wks.unlink() cell.text_format['fontSize'] = 11 cell.text_format['bold'] = True cell.borders = {'top': {'style': 'SOLID'}} wks.link() return cell
def remove_duplicates(self, stream: Worksheet, rows_list: list): """ Removes duplicated rows, provided by `rows_list` as list of indexes. We are working with delete operation in offline mode, to decrease the number of API calls. 1) Unlink the spreadsheet (make it for offline use) 2) Perform delete operation and update the actual row index 3) Link the spreadsheet (sync with online version) using batch_update method. """ stream.unlink() [stream.delete_rows(row, 1) for row in rows_list] stream.link()
def upload_top_movies(wk_top_movies: Worksheet): title = 'Topplistor' if wk_top_movies.title == title: top_movies = db.top_movies() rows: List[Row] = list() for row in top_movies: imdb_link = IMDB_LINK.format(row[0]) rows.append([to_link(imdb_link, row[1]), row[2]]) row_end, col_end = len(top_movies) + 2, ALPHABETH[len(top_movies[0]) - 1] wk_top_movies.update_values(f'A3:{col_end}{row_end}', rows) else: print(f'Wrong title on worksheet. Got {wk_top_movies.title} ,expected {title}')
def _write_bookings_by_type_and_digital_counts(departement_code: str, worksheet: Worksheet, current_row: int): if departement_code: bookings_by_type_and_digital_counts = get_offer_counts_grouped_by_type_and_medium( partial( query_get_booking_counts_grouped_by_type_and_medium_for_departement, departement_code=departement_code), 'Nombre de réservations') else: bookings_by_type_and_digital_counts = get_offer_counts_grouped_by_type_and_medium( query_get_booking_counts_grouped_by_type_and_medium, 'Nombre de réservations') worksheet.set_dataframe(bookings_by_type_and_digital_counts, f'E{current_row}')
def _write_offer_counts_grouped_by_type_and_medium(departement_code: str, worksheet: Worksheet, current_row: int): if departement_code: offer_counts_grouped_by_type_and_medium = get_offer_counts_grouped_by_type_and_medium( partial( query_get_offer_counts_grouped_by_type_and_medium_for_departement, departement_code=departement_code), 'Nombre d\'offres') else: offer_counts_grouped_by_type_and_medium = get_offer_counts_grouped_by_type_and_medium( query_get_offer_counts_grouped_by_type_and_medium, 'Nombre d\'offres') worksheet.set_dataframe(offer_counts_grouped_by_type_and_medium, f'A{current_row}')
def should_add(worksheet: pyg.Worksheet, comment: praw.reddit.models.Comment): """ Whether or not a comment passes the criteria for adding to the Google Sheet and replying :param worksheet: worksheet containing grey points :param comment: comment by Grey :return: Whether comment passes """ # not in sheet worksheet_df = worksheet.get_as_df() if comment.id in worksheet_df['Comment ID'].to_list(): return False # grey deleted his comment or it was removed if comment.removal_reason is not None: return False # parent is deleted if comment.parent().author is None: return False # doesn't contain point_trigger if not contains_point_trigger(comment.body): return False return True
def upload_movies(wk_movies: Worksheet) -> None: title = 'Filmer' if wk_movies.title == title: rows: List[Row] = [] movies: List[Movie] = db.movies() for movie in movies: rows.append(get_row_rating(movie)) row_end, col_end = len(movies) + 1, ALPHABETH[len(USERS) + 4] wk_movies.update_values(f'A2:{col_end}{row_end}', rows) latest_ratings = [ [a, humanize.naturalday(b), to_link(IMDB_LINK.format(e), c), d] for (a, b, c, d, e) in db.latest_ratings(20) ] wk_movies.update_values('M12:P31', latest_ratings) else: print(f'Wrong title on worksheet. Got {wk_movies.title} ,expected {title}')
def update_sheet_from_mfp(username: str, mfp_client: myfitnesspal.Client, wks: pygsheets.Worksheet): last_cell = _get_col(wks, 1)[-1][0] row = last_cell.row last_date = datetime.strptime(last_cell.value, DATE_FORMAT) nutr_to_column = _row_to_col_index_dict(_get_row(wks, 1)[0]) for day in _date_range(last_date + timedelta(days=1)): row += 1 nutrs: List[Any] = [None] * len(nutr_to_column) nutrs[0] = day.strftime(DATE_FORMAT) mfp_nutrs = mfp_client.get_date(day, username=username) for nutrient, amount in mfp_nutrs.totals.items(): if nutrient in nutr_to_column: nutrs[nutr_to_column[nutrient]] = amount wks.update_values((row, 1), [nutrs])
def add_comment_to_sheet(comment: praw.reddit.models.Comment, worksheet: pyg.Worksheet): worksheet_df = worksheet.get_as_df() formatted_utc = datetime.utcfromtimestamp(comment.created_utc) new_entry = { 'Point ID': worksheet_df['Point ID'].max() + 1, 'Username': rf'/u/{comment.parent().author}', 'Comment Link': rf'https://old.reddit.com/comments/{comment.link_id[3:]}/_/{comment.id}/?context=3', 'Subreddit': rf"/r/{comment.subreddit}", 'Date': formatted_utc.strftime('%Y-%m-%d %H:%M:%S'), 'Comment ID': comment.id } worksheet_df = pd.concat( [worksheet_df, pd.DataFrame(new_entry, index=[1])]) worksheet.set_dataframe(worksheet_df, start='A1')
def _get_row(sheet: pygsheets.Worksheet, index: int) -> List[List[Cell]]: """Get row number `index` from the worksheet `sheet`. Returns: list[pygsheets.Cell]: A list of references to the cells in the row, excluding empty cells at the end. """ return sheet.get_values( start=(index, 1), end=(index, sheet.cols), returnas="cell", include_tailing_empty=False, )
def find_duplicates(self, stream: Worksheet, primary_key: str): """ Finds the duplicated records inside of target worksheet. Returns: List of indexes of rows to remove from target worksheet. [1, 4, 5, ..., 99] """ rows_unique_values, rows_to_delete = {}, [] pk_col_index = self.index_cols(stream)[primary_key] # get all values except 0, because it's a header value pk_col_values = stream.get_col(pk_col_index, include_tailing_empty=False)[1:] for i, row_value in enumerate(pk_col_values, 2): if row_value not in rows_unique_values: rows_unique_values[row_value] = None else: rows_to_delete.append(i) # reverse the order of the list rows_to_delete.reverse() return rows_to_delete
def __LoadWorkSheet( self, worksheet: pygsheets.Worksheet ) -> Tuple[PaymentsData, PaymentsDataErrors]: payments_data = PaymentsData(self.config) payments_data_err = PaymentsDataErrors() # Get column indexes email_col_idx = self._ColumnToIndex( self.config.GetValue(BotConfigTypes.PAYMENT_EMAIL_COL)) user_col_idx = self._ColumnToIndex( self.config.GetValue(BotConfigTypes.PAYMENT_USER_COL)) expiration_col_idx = self._ColumnToIndex( self.config.GetValue(BotConfigTypes.PAYMENT_EXPIRATION_COL)) # Read each row rows = worksheet.get_all_values(returnas="matrix") for i, row in enumerate(rows): # Skip header (first row) if i > 0: try: # Get cell values email = row[email_col_idx].strip() user = User.FromString(self.config, row[user_col_idx].strip()) expiration = row[expiration_col_idx].strip() except IndexError: self.logger.GetLogger().warning( f"Row index {i + 1} is not valid (some fields are missing), skipping it..." ) else: # Skip invalid users if user.IsValid(): self.__AddPayment(i + 1, payments_data, payments_data_err, email, user, expiration) return payments_data, payments_data_err
def check_values(self, wks: Worksheet) -> bool: value = wks.get_value("A2") return True if value == self.test_data[1] else False
def populate_test_wks(self, wks: Worksheet) -> Worksheet: wks.append_table(self.test_data, dimension="COLUMNS") return wks
def _write_usage_table(departement_code: str, worksheet: Worksheet, current_row: int) -> int: worksheet.update_value(f'A{current_row}', '# Comptes activés') worksheet.update_value(f'B{current_row}', count_activated_users(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '# Comptes ayant réservé') worksheet.update_value(f'B{current_row}', count_users_having_booked(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '# Moyen de réservations') worksheet.update_value( f'B{current_row}', get_mean_number_of_bookings_per_user_having_booked(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '€ Moyen de dépenses') worksheet.update_value(f'B{current_row}', get_mean_amount_spent_by_user(departement_code)) return current_row
def _write_diversification_table(departement_code: str, worksheet: Worksheet, current_row: int) -> int: worksheet.update_value(f'A{current_row}', 'Indicateur') worksheet.update_value(f'B{current_row}', 'Valeur') worksheet.update_value(f'E{current_row}', 'Indicateur') worksheet.update_value(f'F{current_row}', 'Valeur') worksheet.update_value(f'I{current_row}', 'Indicateur') worksheet.update_value(f'J{current_row}', 'Valeur') current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '# Offreurs depuis le début du pass') worksheet.update_value(f'B{current_row}', get_offerer_count(departement_code)) worksheet.update_value(f'E{current_row}', '# Offres depuis le début du pass') worksheet.update_value( f'F{current_row}', get_offers_with_user_offerer_and_stock_count(departement_code)) worksheet.update_value(f'I{current_row}', '# Réservations') worksheet.update_value(f'J{current_row}', get_all_bookings_count(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value( f'A{current_row}', '# Offreurs ayant mis une offre depuis le début du pass') worksheet.update_value(f'B{current_row}', get_offerer_with_stock_count(departement_code)) worksheet.update_value(f'E{current_row}', '# Offres disponibles') worksheet.update_value( f'F{current_row}', get_offers_available_on_discovery_count(departement_code)) worksheet.update_value(f'I{current_row}', '# Réservations validées') worksheet.update_value(f'J{current_row}', get_all_used_or_finished_bookings(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '# Offreurs ayant une offre disponible') worksheet.update_value( f'B{current_row}', get_offerers_with_offer_available_on_discovery_count(departement_code)) worksheet.update_value(f'E{current_row}', '# Offres réservées') worksheet.update_value( f'F{current_row}', get_offers_with_non_cancelled_bookings_count(departement_code)) worksheet.update_value(f'I{current_row}', '# Réservations annulées') worksheet.update_value(f'J{current_row}', count_all_cancelled_bookings(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '# Offreurs réservés') worksheet.update_value( f'B{current_row}', get_offerers_with_non_cancelled_bookings_count(departement_code)) return current_row
def _write_finance_table(departement_code: str, worksheet: Worksheet, current_row: int) -> int: worksheet.update_value(f'A{current_row}', 'Indicateur') worksheet.update_value(f'B{current_row}', 'Valeur') current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '€ Crédit total activé') worksheet.update_value(f'B{current_row}', get_total_deposits(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '€ Crédit total dépensé') worksheet.update_value(f'B{current_row}', get_total_amount_spent(departement_code)) current_row += DashboardConfig.space_between_lines worksheet.update_value(f'A{current_row}', '€ Dépenses totales à rembourser') worksheet.update_value(f'B{current_row}', get_total_amount_to_pay(departement_code)) return current_row