Beispiel #1
0
    def sync_table(target_map):

        update_cells = []
        target_text_map = build_target_map()
        colnumber = 33 #if direct_column else 34
        valid_target_dict = utils.get_valid_target_map(worksheets["BIA to SDG mapping"], colnumber)
        write_row_num = title_count
        for row in worksheets["BIA to SDG Target Mapping"]:

            writetarget = row[0]

            targets = []
            if writetarget in valid_target_dict:
                targets = valid_target_dict[writetarget]
            padding = [''] * (20 - len(targets))
            targets.extend(padding)

            write_row_num += 1

            for n in range(len(targets)):

                if targets[n] == '':
                    update_cells.append(gspread.Cell(write_row_num, 12 + n, ''))
                else:
                    update_cells.append(gspread.Cell(write_row_num, 12 + n, target_text_map[targets[n]]))

        # Update all cells
        writesheet.update_cells(update_cells)
Beispiel #2
0
    def overwrite_sheet(self, spreadsheet_id, table, sheet_index=0):
        """
        Replace the data in a Google sheet with a Parsons table, using the table's columns as the
        first row.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            sheet_index: int (optional)
                The index of the desired worksheet
        """

        sheet = self._get_sheet(spreadsheet_id, sheet_index)
        sheet.clear()

        # Add header row
        sheet.append_row(table.columns)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # We start at row #2 to keep room for the header row we added above
                cells.append(
                    gspread.Cell(row_num + 2, col_num + 1, row[col_num]))

        # Update the data in one batch
        sheet.update_cells(cells)
Beispiel #3
0
    async def cmd_xproll(self, *args, member, channel, **_):
        target = member
        try:
            wsh = gc.open_by_key(CHAR_SHEET[str(target.id)]).sheet1
        except:
            raise BotError("Impossible d'ouvrir la fiche de personnage du membre")
        ll = wsh.get_all_values()  # type: List[List[str]]
        if len(ll[0]) < 13:
            raise BotError("La fiche de personnage doit au moins avoir une colonne 'M'")

        recap = []
        to_update = {}
        for i, line in enumerate(ll):
            if line[12] == 'TRUE':  # if column M == TRUE
                last_rec = xp_roll(line)
                recap.append(last_rec)
                if last_rec['success']:
                    to_update[i + 1] = last_rec['new_xp']
        if not recap:
            return await channel.send("Vous n'avez aucun jet d'XP à faire ...")
        await channel.send("```diff\n{}```".format('\n'.join(
           [( f"{'+' if d['success'] else '-'} {d['comp_name'][:16]:<16} {d['roll']:^3}/{d['old_total']:>3}"
            + ' | ' + (f"{d['old_total']:^3}->{d['new_total']:^3} (+{d['xp_won']}){' CRITIQUE' if d['crits'] else ''}" if d['success'] else 'Échoué'))
            for d in recap]
        )))
        up = wsh.range(f"I1:I{len(ll)}")
        up = [gspread.Cell(cell.row, cell.col, to_update[cell.row]) for cell in up if cell.row in to_update]
        if up:
            wsh.update_cells(up)
        up = wsh.range(f"M1:M{len(ll)}")
        for cell in up:
            if cell.value == 'FALSE' or cell.value == 'TRUE':
                cell.value = False
        if up:
            wsh.update_cells(up)
Beispiel #4
0
    def append_to_sheet(self, spreadsheet_id, table, sheet_index=0):
        """
        Append data from a Parsons table to a Google sheet. Note that the table's columns are
        ignored, as we'll be keeping whatever header row already exists in the Google sheet.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            sheet_index: int (optional)
                The index of the desired worksheet
        """

        sheet = self._get_sheet(spreadsheet_id, sheet_index)

        # Grab the existing data, so we can figure out where to start adding new data as a batch.
        # TODO Figure out a way to do a batch append without having to read the whole sheet first.
        # Maybe use gspread's low-level batch_update().
        existing_table = self.read_sheet(spreadsheet_id, sheet_index)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # Add 2 to allow for the header row, and for google sheets indexing starting at 1
                sheet_row_num = existing_table.num_rows + row_num + 2
                cells.append(
                    gspread.Cell(sheet_row_num, col_num + 1, row[col_num]))

        # Update the data in one batch
        sheet.update_cells(cells)
Beispiel #5
0
def unmap(livesheet, worksheets, title_report_count):
    """Transpose mapped and unmapped indicators column in the report sheet"""

    def getmax_count(wks, col):
        maxcount = 0
        for x in wks:
            l = [a.strip() for a in x[col].splitlines()]
            maxcount = max(maxcount, len(l))
        return maxcount

    sheetname = "Unmapped Indicators"
    map_col = 14
    unmap_col = 15
    max_mapped_count = getmax_count(worksheets[sheetname], map_col)
    max_unmapped_count = getmax_count(worksheets[sheetname], unmap_col)
    title_count = title_report_count[sheetname]
    start_map_col = map_col + 8
    start_unmap_col = start_map_col + max_mapped_count
    writesheet = livesheet.worksheet(sheetname)

    update_cells = []
    write_row_num = title_count

    # Write Titles
    for n in range(max_mapped_count):
        update_cells.append(gspread.Cell(write_row_num, start_map_col + n, "Mapped_{}".format(n+1)))
    for n in range(max_unmapped_count):
        update_cells.append(gspread.Cell(write_row_num, start_unmap_col + n, "Unmapped_{}".format(n+1)))

    # Write Values
    for n, row in enumerate(worksheets[sheetname]):
        map_vals = [a.strip() for a in row[map_col].splitlines()]
        padding = [''] * (max_mapped_count - len(map_vals))
        map_vals.extend(padding)

        unmap_vals = [a.strip() for a in row[unmap_col].splitlines()]
        padding = [''] * (max_unmapped_count - len(unmap_vals))
        unmap_vals.extend(padding)

        write_row_num += 1

        for num in range(max_mapped_count):
            update_cells.append(gspread.Cell(write_row_num, start_map_col + num, map_vals[num]))
        for num in range(max_unmapped_count):
            update_cells.append(gspread.Cell(write_row_num, start_unmap_col + num, unmap_vals[num]))

    writesheet.update_cells(update_cells)
Beispiel #6
0
def updateSpreadsheet(rows):  # Updates the entire Google Spreadsheet
    cells = []
    for row_num, row in enumerate(rows):
        for col_num, cell in enumerate(row):
            cell = cell
            cells.append(
                gspread.Cell(row_num + 1, col_num + 1, rows[row_num][col_num]))
    spreadsheet.worksheet.update_cells(cells)
Beispiel #7
0
def get_range(worksheet_data, start_row, start_col, end_row, end_col):
    result = []
    for row_no in range(start_row - 1, end_row):
        row = worksheet_data[row_no] if row_no < len(worksheet_data) else []
        for col_no in range(start_col - 1, end_col):
            result.append(
                gspread.Cell(row_no + 1, col_no + 1,
                             row[col_no] if col_no < len(row) else ""))
    return result
Beispiel #8
0
def create_cells(data, header):
    cells = []
    for column in header:
        cells.append(gspread.Cell(1, header.index(column) + 1, column))

    for row_num, row in enumerate(data):
        for col_num, cell in enumerate(row):
            if str(data[row_num][col_num]).lower() == 'true' or str(
                    data[row_num][col_num]).lower() == 'false':
                cells.append(
                    gspread.Cell(row_num + 2, col_num + 1,
                                 str(data[row_num][col_num]).lower()))
            else:
                cells.append(
                    gspread.Cell(row_num + 2, col_num + 1,
                                 str(data[row_num][col_num])))
                #TODO: Formating of integers is sketchy: if there's a row with NULL value, integers become decimals.
                #cells.append(gspread.Cell(row_num + 2, col_num + 1, gspread.numericise(str(df[row_num][col_num]))))
    return cells
Beispiel #9
0
    def append_to_sheet(self,
                        spreadsheet_id,
                        table,
                        worksheet=0,
                        user_entered_value=False,
                        **kwargs):
        """
        Append data from a Parsons table to a Google sheet. Note that the table's columns are
        ignored, as we'll be keeping whatever header row already exists in the Google sheet.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            worksheet: str or int
                The index or the title of the worksheet. The index begins with
                0.
            user_entered_value: bool (optional)
                If True, will submit cell values as entered (required for entering formulas).
                Otherwise, values will be entered as strings or numbers only.
        """

        # This is in here to ensure backwards compatibility with previous versions of Parsons.
        if 'sheet_index' in kwargs:
            worksheet = kwargs['sheet_index']
            logger.warning('Argument deprecated. Use worksheet instead.')

        sheet = self._get_worksheet(spreadsheet_id, worksheet)

        # Grab the existing data, so we can figure out where to start adding new data as a batch.
        # TODO Figure out a way to do a batch append without having to read the whole sheet first.
        # Maybe use gspread's low-level batch_update().
        existing_table = self.get_worksheet(spreadsheet_id, worksheet)

        # If the existing sheet is blank, then just overwrite the table.
        if existing_table.num_rows == 0:
            return self.overwrite_sheet(spreadsheet_id, table, worksheet,
                                        user_entered_value)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # Add 2 to allow for the header row, and for google sheets indexing starting at 1
                sheet_row_num = existing_table.num_rows + row_num + 2
                cells.append(
                    gspread.Cell(sheet_row_num, col_num + 1, row[col_num]))

        value_input_option = 'RAW'
        if user_entered_value:
            value_input_option = 'USER_ENTERED'

        # Update the data in one batch
        sheet.update_cells(cells, value_input_option=value_input_option)
        logger.info(f'Appended {table.num_rows} rows to worksheet.')
def writeToSheet(sheet, header, data):
    if len(data) > 0:
        client.login()
        s = client.open("Indeed v2").worksheet(sheet)
        book = client.open("Indeed v2")
        book.values_clear(sheet + "!A1:U10000")
        data.insert(0, header)
        cells = []
        for row_num, row in enumerate(data):
            for col_num, cell in enumerate(row):
                cells.append(
                    gspread.Cell(row_num + 1, col_num + 1,
                                 data[row_num][col_num]))
        s.update_cells(cells)
Beispiel #11
0
    def _cell_feed(
            self,
            row=None,
            max_row=None,
            further_rows=False,  # XXX: REFACTOR
            col=None,
            max_col=None,
            further_cols=False,
            return_empty=False):

        # Fetches cell data for a given row, and all following rows if
        # further_rows is True. If no row is given, all cells are returned.
        params = {}
        if row is not None:
            params['min-row'] = str(row)
            if max_row is not None:
                params['max-row'] = str(max_row)
            elif not further_rows:
                params['max-row'] = str(row)

        if col is not None:
            params['min-col'] = str(col)
            if max_col is not None:
                params['max-col'] = str(max_col)
            elif not further_cols:
                params['max-col'] = str(col)

            if (params['min-col'] == params['max-col']
                    and params['min-col'] == '0'):
                return []

        if return_empty:
            params['return-empty'] = "true"

        logger.info("getting cell feed")
        try:
            feed = self.gspread_client.get_cells_feed(self.worksheet,
                                                      params=params)
            # Bit of a hack to rip out Gspread's xml parsing.
            cfeed = [
                gspread.Cell(self, elem)
                for elem in feed.findall(gspread.client._ns('entry'))
            ]
        except Exception as e:
            logger.exception("gspread error. %s", e)
            raise e

        return cfeed
Beispiel #12
0
    def overwrite_sheet(self,
                        spreadsheet_id,
                        table,
                        worksheet=0,
                        user_entered_value=False,
                        **kwargs):
        """
        Replace the data in a Google sheet with a Parsons table, using the table's columns as the
        first row.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            worksheet: str or int
                The index or the title of the worksheet. The index begins with
                0.
            user_entered_value: bool (optional)
                If True, will submit cell values as entered (required for entering formulas).
                Otherwise, values will be entered as strings or numbers only.
        """

        # This is in here to ensure backwards compatibility with previous versions of Parsons.
        if 'sheet_index' in kwargs:
            worksheet = kwargs['sheet_index']
            logger.warning('Argument deprecated. Use worksheet instead.')

        sheet = self._get_worksheet(spreadsheet_id, worksheet)
        sheet.clear()

        value_input_option = 'RAW'
        if user_entered_value:
            value_input_option = 'USER_ENTERED'

        # Add header row
        sheet.append_row(table.columns, value_input_option=value_input_option)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # We start at row #2 to keep room for the header row we added above
                cells.append(
                    gspread.Cell(row_num + 2, col_num + 1, row[col_num]))

        # Update the data in one batch
        sheet.update_cells(cells, value_input_option=value_input_option)
        logger.info('Overwrote worksheet.')
Beispiel #13
0
def update_private(user, userindex):
    print(user)
    print(user['pk'], user['username'], user['full_name'])
    cells.append(
        gspread.Cell(userindex, 1,
                     '=IMAGE("%s",4,50,50)' % user['profile_pic_url']))
    cells.append(
        gspread.Cell(
            userindex, 2, '=HYPERLINK("https://www.instagram.com/%s/","%s")' %
            (user['username'], user['username'])))
    cells.append(gspread.Cell(userindex, 3, '%s' % user['full_name']))
    cells.append(gspread.Cell(userindex, 4, '%s' % 'PRIVATE'))
    cells.append(gspread.Cell(userindex, 5, '%s' % 'PRIVATE'))
    cells.append(gspread.Cell(userindex, 6, '%s' % 'PRIVATE'))
Beispiel #14
0
    def append_to_sheet(self,
                        spreadsheet_id,
                        table,
                        sheet_index=0,
                        user_entered_value=False):
        """
        Append data from a Parsons table to a Google sheet. Note that the table's columns are
        ignored, as we'll be keeping whatever header row already exists in the Google sheet.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            sheet_index: int (optional)
                The index of the desired worksheet
            user_entered_value: bool (optional)
                If True, will submit cell values as entered (required for entering formulas).
                Otherwise, values will be entered as strings or numbers only.
        """

        sheet = self._get_sheet(spreadsheet_id, sheet_index)

        # Grab the existing data, so we can figure out where to start adding new data as a batch.
        # TODO Figure out a way to do a batch append without having to read the whole sheet first.
        # Maybe use gspread's low-level batch_update().
        existing_table = self.read_sheet(spreadsheet_id, sheet_index)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # Add 2 to allow for the header row, and for google sheets indexing starting at 1
                sheet_row_num = existing_table.num_rows + row_num + 2
                cells.append(
                    gspread.Cell(sheet_row_num, col_num + 1, row[col_num]))

        value_input_option = 'RAW'
        if user_entered_value:
            value_input_option = 'USER_ENTERED'

        # Update the data in one batch
        sheet.update_cells(cells, value_input_option=value_input_option)
Beispiel #15
0
    def overwrite_sheet(self,
                        spreadsheet_id,
                        table,
                        sheet_index=0,
                        user_entered_value=False):
        """
        Replace the data in a Google sheet with a Parsons table, using the table's columns as the
        first row.

        `Args:`
            spreadsheet_id: str
                The ID of the spreadsheet (Tip: Get this from the spreadsheet URL)
            table: obj
                Parsons table
            sheet_index: int (optional)
                The index of the desired worksheet
            user_entered_value: bool (optional)
                If True, will submit cell values as entered (required for entering formulas).
                Otherwise, values will be entered as strings or numbers only.
        """

        sheet = self._get_sheet(spreadsheet_id, sheet_index)
        sheet.clear()

        value_input_option = 'RAW'
        if user_entered_value:
            value_input_option = 'USER_ENTERED'

        # Add header row
        sheet.append_row(table.columns, value_input_option=value_input_option)

        cells = []
        for row_num, row in enumerate(table.data):
            for col_num, cell in enumerate(row):
                # We start at row #2 to keep room for the header row we added above
                cells.append(
                    gspread.Cell(row_num + 2, col_num + 1, row[col_num]))

        # Update the data in one batch
        sheet.update_cells(cells, value_input_option=value_input_option)
Beispiel #16
0
def update(user, userindex):
    totalposts = len(igapi.getTotalUserFeed('%d' % user['pk']))
    totalfollowers = len(igapi.getTotalFollowers('%d' % user['pk']))
    totalfollowing = len(igapi.getTotalFollowings('%d' % user['pk']))
    print(user)
    print(user['pk'], user['username'], user['full_name'], totalposts,
          totalfollowers, totalfollowing)
    cells.append(
        gspread.Cell(userindex, 1,
                     '=IMAGE("%s",4,50,50)' % user['profile_pic_url']))
    cells.append(
        gspread.Cell(
            userindex, 2, '=HYPERLINK("https://www.instagram.com/%s/","%s")' %
            (user['username'], user['username'])))
    cells.append(gspread.Cell(userindex, 3, '%s' % user['full_name']))
    cells.append(gspread.Cell(userindex, 4, '%s' % totalposts))
    cells.append(gspread.Cell(userindex, 5, '%s' % totalfollowers))
    cells.append(gspread.Cell(userindex, 6, '%s' % totalfollowing))
Beispiel #17
0
async def schedule(ctx, game="", date="", time="", name=""):
    #Check if the command was called from someone with a "Gamemaster" role
    server_roles = ctx.message.guild.roles
    gamemaster = None

    for i in server_roles:
        if (i.name == "Gamemaster"):  #If there is a role called "Gamemaster"
            gamemaster = i  #Get the gamemaster role
            print("Gamemaster role available.")

    if (gamemaster == None):  #If "Gamemaster" role was not found
        # create embed
        embed_fail = discord.Embed(
            title=
            "Gamemaster role does not exist. Contact server staff to ask them to make this role. (case-sensitive)",
            color=RED)
        embed_fail.set_author(name="Gamemaster not available", icon_url=ERROR)
        # -
        await ctx.message.channel.send(embed=embed_fail)
        return

    #If the message author is in the list of people that have the "Gamemaster" role
    if (ctx.message.author in gamemaster.members):
        #Let them schedule the event
        print("Able to schedule.")
    else:
        # create embed
        embed_fail = discord.Embed(
            title=
            'Only those with the "Gamemaster" role can schedule game nights',
            color=RED)
        embed_fail.set_author(name="Permission denied", icon_url=ERROR)
        # -
        await ctx.message.channel.send(embed=embed_fail)
        return

    #After all that permissions checking, time to catch incomplete information!
    #Game, date and time are absolutely mandatory, while name is optional
    if (game == "" or date == "" or time == ""):
        # create embed
        embed_fail = discord.Embed(
            title=
            "Missing information required for scheduling. See help command for details.",
            color=RED)
        embed_fail.set_author(name="Incomplete command", icon_url=ERROR)
        # -
        await ctx.message.channel.send(embed=embed_fail)
        return

    game = check_default_alias(game, ctx)
    spread = client.open(str(ctx.guild.id))
    sheet = spread.worksheet(game)

    try:

        var_date = None
        var_time = None

        print(str(sheet.cell(2, 2)), str(sheet.cell(2, 3)))
        #Try to find others cell with the same data
        var_date = sheet.find(str(date))
        var_time = sheet.find(str(time))

        if (date == var_date.value):
            print("Date has been found.")
            if (time == var_time.value):
                print("Time has been found.")
                # create embed
                embed_fail = discord.Embed(
                    title="This event is already scheduled for that game.",
                    color=RED)
                embed_fail.set_author(name="Unavailable slot", icon_url=ERROR)
                # -
                await ctx.message.channel.send(embed=embed_fail)
                return
    #No other cells are found
    except gspread.exceptions.CellNotFound:
        print("No dupes found. Proceed to scheduling event.")
    except gspread.exceptions.WorksheetNotFound:
        # create embed
        embed_fail = discord.Embed(
            title=
            "Game does not exist. Make sure arguments are in Game, Date, Time order and try again. If that doesn't work, see the addgame command.",
            color=RED)
        embed_fail.set_author(name="Game not found", icon_url=ERROR)
        # -
        await ctx.message.channel.send(embed=embed_fail)

    print(sys.exc_info()[0])
    """
    Can finally start the actual scheduling code. Yay.
    Searches for each individual element in the specified game,
    if it doesn't exist, create it
    """
    try:
        sheet = spread.worksheet(game)

        neweventid = game[0:2] + game[-2:] + "-" + str(
            randint(10000000, 99999999))
        cell = sheet.find("Player Name")
        for i in range(cell.row,
                       sheet.row_count):  #Looping through all the rows
            cell = sheet.cell(i, cell.col)
            if (cell.value == ""
                ):  #Get the first cell in that column that's blank
                sheet.update_cell(i, cell.col + 1, date)
                time_cell = sheet.update_cell(i, cell.col + 2, time)

                if (name != ""):
                    name_cell = sheet.update_cell(i, cell.col + 2, name)

                sheet.update_cell(
                    sheet.find("Event Id").row + i - 1,
                    sheet.find("Event Id").col, neweventid)
                sheet.update_cell(
                    sheet.find("Num Players").row + i - 1,
                    sheet.find("Num Players").col, 0)

                # create embed
                embed_finish = discord.Embed(
                    title=
                    "New players can join using the event using the Event ID.",
                    description="Command to join is {}join [game] [event id]".
                    format(BOT_PREFIX),
                    color=GREEN)
                embed_finish.add_field(name="Event ID",
                                       value=neweventid,
                                       inline=True)
                embed_finish.add_field(name="Game", value=game, inline=True)
                embed_finish.set_author(name="Schedule Success",
                                        icon_url=SUCCESS)
                # -

                await ctx.message.channel.send(embed=embed_finish)
                return

        date_cell = sheet.find("Date")
        time_cell = sheet.find("Time")
        print(date_cell, time_cell)

        cells_changed = []

        for i in range(
                date_cell.row,
                sheet.row_count):  #Looping through all the rows, i is an int
            cell = sheet.cell(i, date_cell.col)
            print(str(cell))

            if (cell.value == ""
                ):  #Get the first cell in the date column that's blank
                #Update cells with Date and Time
                cells_changed.append([(i, cell.col), (i, time_cell.col)])
                print("\n{}".format("cells_changed: {}".format(cells_changed)))

                if (name != ""):
                    name_cell = sheet.find("Name")
                    cells_changed.append((i, name_cell.col))
                    print("\n{}".format(cells_changed))

                cells = []
                for pair, value in cells_changed:
                    new_cell = gspread.Cell(pair[0], pair[1])
                    new_cell.value = str(date)
                    cells.append(new_cell)

                    new_cell = gspread.Cell(value[0], value[1])
                    new_cell.value = str(time)
                    cells.append(new_cell)

                    print(type(pair))
                    print("Pair: {}, Value: ".format(pair))
                    print("Cells: {}".format(cells))

                sheet.update_cells(cells, "RAW")
                # create embed
                embed_info = discord.Embed(
                    title="Scheduled game night successfully!", color=GREEN)
                embed_info.set_author(name="Schedule Success",
                                      icon_url=SUCCESS)
                # -
                await ctx.message.channel.send(embed=embed_info)
                return
    except gspread.exceptions.WorksheetNotFound:
        # create embed
        embed_fail = discord.Embed(
            title=
            "Game does not exist. Make sure arguments are in Game, Date, Time order and try again. If that doesn't work, see the addgame command.",
            color=RED)
        embed_fail.set_author(name="Game not found", icon_url=ERROR)
        # -
        await ctx.message.channel.send(embed=embed_fail)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--json", required=True)
    parser.add_argument("--sheet", required=True)
    parser.add_argument("--credentials", default="credentials.json")
    parser.add_argument("--start_col", default="1", type=int)
    args = parser.parse_args()

    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        args.credentials, SCOPES)
    gcloud = gspread.authorize(credentials)

    spreadsheet = gcloud.open_by_url(args.sheet)

    with open(args.json) as json_file:
        test_results = json.load(json_file)

    try:
        worksheet = spreadsheet.worksheet(test_results["suite"])
    except gspread.exceptions.WorksheetNotFound:
        print(" * ERROR: Worksheet {} not found".format(test_results["suite"]))
        # could add_worksheet?
        sys.exit(1)

    worksheet_data = worksheet.get_all_values()
    populated_rows = len(worksheet_data)
    if populated_rows == 0:
        # Blank spreadsheet, row 1 will be for column titles
        current_row = 2
    else:
        current_row = populated_rows + 1

    # Columns before start_col reserved for manually entered details
    start_col = max(1, args.start_col)

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    metadata_cols = 4
    # Columns for counts of Tests, and each Test Status
    state_cols = 1 + len(TEST_STATES)

    # First results column
    results_col = start_col + metadata_cols + state_cols

    # Column after last results
    if populated_rows == 0:
        # Blank spreadsheet
        next_col = results_col
    else:
        next_col = max(results_col, len(worksheet_data[0]) + 1)

    # Test Names
    start_cell_addr = gspread.utils.rowcol_to_a1(1, 1)
    end_cell_addr = gspread.utils.rowcol_to_a1(1, next_col)
    cell_list_names = worksheet.range("{}:{}".format(start_cell_addr,
                                                     end_cell_addr))[:-1]

    # Results
    start_cell_addr = gspread.utils.rowcol_to_a1(current_row, 1)
    end_cell_addr = gspread.utils.rowcol_to_a1(current_row, next_col)
    cell_list_results = worksheet.range("{}:{}".format(start_cell_addr,
                                                       end_cell_addr))[:-1]

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    current_index = start_col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
    cell_list_names[current_index].value = "Filename"
    cell_list_results[current_index].value = args.json
    current_index += 1
    cell_list_names[current_index].value = "URLs Tested"
    try:
        urls_tested = []
        for endpoint in test_results["endpoints"]:
            urls_tested.append("{}:{} ({})".format(endpoint["host"],
                                                   endpoint["port"],
                                                   endpoint["version"]))
        cell_list_results[current_index].value = ", ".join(urls_tested)
    except Exception:
        print(" * WARNING: JSON file does not include endpoints")
        cell_list_results[current_index].value = test_results["url"]
    current_index += 1
    cell_list_names[current_index].value = "Timestamp"
    cell_list_results[current_index].value = (
        datetime.datetime.utcfromtimestamp(
            test_results["timestamp"]).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] +
        'Z')
    current_index += 1
    cell_list_names[current_index].value = "Test Suite"
    try:
        cell_list_results[current_index].value = "{} ({})".format(
            test_results["suite"], test_results["config"]["VERSION"])
    except Exception:
        print(" * WARNING: JSON file does not include test suite version")
        cell_list_results[current_index].value = test_results["suite"]

    # Columns for counts of Tests and each Test Status
    results_addr = "{}:{}".format(
        gspread.utils.rowcol_to_a1(current_row, results_col),
        gspread.utils.rowcol_to_a1(current_row, 1)[1:])

    current_index += 1
    cell_list_names[current_index].value = "Tests"
    # count non-empty cells on rest of this row
    cell_list_results[current_index].value = "=COUNTIF({}, \"?*\")".format(
        results_addr)
    for state in TEST_STATES:
        current_index += 1
        cell_list_names[current_index].value = state
        # count cells on the rest of this row that match this column's status
        current_col_addr = gspread.utils.rowcol_to_a1(
            1, cell_list_names[current_index].col)
        cell_list_results[current_index].value = "=COUNTIF({}, CONCAT({},\"*\"))" \
                                                 .format(results_addr, current_col_addr)

    # Columns for the Results
    for result in test_results["results"]:
        cell_contents = result["state"]
        if result["detail"] != "":
            cell_contents += " (" + result["detail"] + ")"
        col = next(
            (cell.col
             for cell in cell_list_names if cell.value == result["name"]),
            None)
        if col:
            index = col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
            cell_list_results[index].value = cell_contents
        else:
            # Test name not found, append column (since gspread doesn't make it easy to insert one)
            col = cell_list_names[-1].col + 1  # = cell_list_results[-1].col+1
            cell_list_names.append(gspread.Cell(1, col, result["name"]))
            cell_list_results.append(
                gspread.Cell(current_row, col, cell_contents))

    worksheet.update_cells(cell_list_names)
    # 'USER_ENTERED' allows formulae to be used
    worksheet.update_cells(cell_list_results,
                           value_input_option='USER_ENTERED')
def update_worksheet(worksheet, lines_range=(2,20)):
    """
    Reads the lines in range, and query wikipedia + wikidata for the entity (fixed) name, update the rest of the fields in the row.
    Batch spreadsheet update (instead of a row-by-row.)
    """

    from read_wiki import fix_entry_name_options
    print("starting to update obudget_wikipedia_categories_table")
    update_obudget_wikipedia_categories_table()      # update the relevant wikipedia categories lists per obudget 'he_kind'
    print("done.")
    
    # define the range in the spreadsheet to update
    start_row = lines_range[0]
    end_row = lines_range[1]
    print(f"start row: {start_row}   end row: {end_row}")

    header = worksheet.row_values(1)   # get the header line keys

    all_cells = []
    summaries = []
    urls = []
    retrieved = {}

    number_of_rows = end_row - start_row

    for row_index in range(start_row, end_row):
        row_values = worksheet.row_values(row_index)
        entity_data = {}

        for cell_index in range(len(header)):
            try:
                cell_value = row_values[cell_index]
            except:
                cell_value = ''
            cell_type = header[cell_index]
            #print(f"row index {row_index} cell index {cell_index}, cell type {cell_type}")

            if cell_type =='id':
                pass
            elif cell_type =='kind_he':
                if cell_value == '' or cell_value == None:
                    obudget_category = None
                else:
                    obudget_category = cell_value

            elif cell_type =='name':
                name = cell_value
                if  name != '' and name != None:
                    if name in retrieved.keys():                   # check if we alrady retrieved this entitiy details
                        entity_data = retrieved[name]
                        # print(f"{name} detailes were alrady available locally")

                    else:
                        #print(f"searching {name}, with obudget category {obudget_category}")
                        entity_data = search_wikipedia(name, obudget_category)
                        # print(f"{name} detailes retrieved")
                else:
                    entity_data = {}

            else:
                if cell_type in entity_data:
                    cell_value = str(entity_data[cell_type])          # update the cell value with the retrieved data
                    #print(f"{name} {cell_type}: {cell_value}")
                else:
                    #print(f"no {cell_type} value for {name}")
                    cell_value = ''

            try:
                all_cells.append(gspread.Cell(row_index, cell_index+1, cell_value))
            except Exception as e:
                print(f"Error while updating row no. {row_index} column {cell_index} for {name}: {e}")

        if "wiki_title" in entity_data:
            print(f"{row_index}. {name} --> {entity_data['wiki_title']}")
        else:
            print(f"{row_index}. {name} ---> no wiki entry")

    try:
        worksheet.update_cells(all_cells)                        # batch saving on google spreadsheet
        print(f"updated google spreadsheet")

    except Exception as e:
        print(f" Error trying to update google spreadsheet")

    return retrieved
Beispiel #20
0
def gsheets_import(test_results,
                   worksheet,
                   filename,
                   start_col=1,
                   insert=False):
    """Upload results data to spreadsheet"""

    worksheet_data = worksheet.get_all_values()
    populated_rows = len(worksheet_data)
    # Columns before start_col reserved for manually entered details
    start_col = max(1, start_col)

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    metadata_cols = 4
    # Columns for counts of Tests, and each Test Status
    state_cols = 1 + len(TEST_STATES)

    # First results column
    results_col = start_col + metadata_cols + state_cols

    # Column after last results
    if populated_rows == 0:
        # Blank spreadsheet
        next_col = results_col
    else:
        next_col = max(results_col, len(worksheet_data[0]) + 1)

    # Test Names
    cell_list_names = get_range(worksheet_data, 1, 1, 1, next_col - 1)
    original_cell_list_names = copy.deepcopy(cell_list_names)

    # Results
    cell_list_results = [""] * len(cell_list_names)

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    current_index = start_col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
    cell_list_names[current_index].value = "Filename"
    cell_list_results[current_index] = filename
    current_index += 1
    cell_list_names[current_index].value = "URLs Tested"
    try:
        urls_tested = []
        for endpoint in test_results["endpoints"]:
            urls_tested.append("{}:{} ({})".format(endpoint["host"],
                                                   endpoint["port"],
                                                   endpoint["version"]))
        cell_list_results[current_index] = ", ".join(urls_tested)
    except Exception:
        print(" * WARNING: JSON file does not include endpoints")
        cell_list_results[current_index] = test_results["url"]
    current_index += 1
    cell_list_names[current_index].value = "Timestamp"
    cell_list_results[current_index] = (datetime.datetime.utcfromtimestamp(
        test_results["timestamp"]).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + 'Z')
    current_index += 1
    cell_list_names[current_index].value = "Test Suite"
    try:
        cell_list_results[current_index] = "{} ({})".format(
            test_results["suite"], test_results["config"]["VERSION"])
    except Exception:
        print(" * WARNING: JSON file does not include test suite version")
        cell_list_results[current_index] = test_results["suite"]

    # Columns for counts of Tests and each Test Status
    result_col_name = gspread.utils.rowcol_to_a1(1, results_col)[0:-1]
    results_addr = 'INDIRECT("${}"&ROW()&":"&ROW())'.format(result_col_name)

    current_index += 1
    cell_list_names[current_index].value = "Tests"
    # count non-empty cells on rest of this row
    cell_list_results[current_index] = "=COUNTIF({}, \"?*\")".format(
        results_addr)
    for state in TEST_STATES:
        current_index += 1
        cell_list_names[current_index].value = state
        # count cells on the rest of this row that match this column's status
        current_col_name = gspread.utils.rowcol_to_a1(
            1, cell_list_names[current_index].col)[0:-1]
        cell_list_results[current_index] = "=COUNTIF({}, CONCAT({}$1,\"*\"))" \
            .format(results_addr, current_col_name)

    # Columns for the Results
    for result in test_results["results"]:
        cell_contents = result["state"]
        if result["detail"] != "":
            cell_contents += " (" + result["detail"] + ")"
        col = next(
            (cell.col
             for cell in cell_list_names if cell.value == result["name"]),
            None)
        if col:
            index = col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
            cell_list_results[index] = cell_contents
        else:
            # Test name not found, append column (since gspread doesn't make it easy to insert one)
            col = cell_list_names[-1].col + 1  # = cell_list_results[-1].col+1
            cell_list_names.append(gspread.Cell(1, col, result["name"]))
            cell_list_results.append(cell_contents)

    if not ranges_equal(original_cell_list_names, cell_list_names):
        worksheet.update_cells(cell_list_names)
    if insert:
        insert_row(worksheet, cell_list_results, 1)
    else:
        append_row(worksheet, cell_list_results)
Beispiel #21
0
    select results.brevet_date, distance, title, rider_names, result from results
    join riders on results.rider_id = riders.rider_id
    join brevets on brevets.brevet_date = results.brevet_date
    order by results.brevet_date""")

    cells = []
    riders = set()

    for row_num, row in enumerate(query, 1):
        col_num = 0
        newbie = 0
        for col_num, cell in enumerate(row, 1):
            if col_num == 4:
                cell = cell.split('|')[-1].split(' ')
                cell = cell[0] + ' ' + cell[1].title()
                if cell in riders:
                    newbie = 0
                else:
                    newbie = 1
                    riders.add(cell)
            cells.append(gspread.Cell(row_num, col_num, cell))
        cells.append(gspread.Cell(row_num, col_num + 1, newbie))

scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret.json', scope)
client = gspread.authorize(creds)
sheet = client.open("vssr").worksheet('brm')
sheet.clear()

sheet.update_cells(cells)
Beispiel #22
0
    async def order(self, ctx, order, *, comment):
        """
        Places an order with the GreenSwarm jf service. Format: o.order <evepraisal link> your comments go here.
        """
        try:
            scope = [
                'https://www.googleapis.com/auth/spreadsheets',
                'https://spreadsheets.google.com/feeds',
                'https://www.googleapis.com/auth/drive'
            ]
            creds = ServiceAccountCredentials.from_json_keyfile_name(
                '/home/zoro/OtterBot/google_secret.json', scope)
            gc = gspread.authorize(creds)
        except APIError as response:
            print(response)

        try:
            shop = gc.open_by_key(
                '16dYWGvfwNOkx3Pf27rrp7yJyX6D3zOtkmm2uOj3Nnis').sheet1
        except APIError as response:
            print(response)

        def next_available_row(shop):
            row_filter = list(filter(None, shop.col_values(1)))
            row_num = len(row_filter) + 1
            return row_num

        if re.match('^.*evepraisal.com\/a\/.*$', order):
            jOrder = order + '.json'
            with urllib.request.urlopen(f'{jOrder}') as url:
                orderData = json.loads(url.read().decode())
                itemData = orderData['items']
                purchaserID = ctx.message.author.display_name
                item_cell = []
                row_num = next_available_row(shop)
                for item in itemData:
                    item_cell.append(gspread.Cell(row_num, 1, item['name']))
                    item_cell.append(gspread.Cell(row_num, 2,
                                                  item['quantity']))
                    item_cell.append(
                        gspread.Cell(row_num, 3, item['prices']['all']['avg']))
                    item_cell.append(gspread.Cell(row_num, 4, purchaserID))
                    item_cell.append(gspread.Cell(row_num, 5, f'{comment}'))
                    row_num += 1
                try:
                    shop.update_cells(item_cell)
                    order_gap = []
                    order_gap.append(
                        gspread.Cell(next_available_row(shop), 1, '-'))
                    shop.update_cells(order_gap)
                except Exception as err:
                    print(err)
            return await ctx.message.add_reaction(chr(0x1f44d))
        elif re.match('^.*zkillboard.com\/kill\/.*$', order):
            match = re.findall('\d\d\d\d\d\d\d\d', order)
            zkillID = match[0]
            jkOrder = f'https://zkillboard.com/api/killID/{zkillID}/'
            with urllib.request.urlopen(f'{jkOrder}') as url:
                orderData = json.loads(url.read().decode())
                ekillID = orderData[0]['killmail_id']
                ekillHash = orderData[0]['zkb']['hash']

                try:
                    kill_op = self.bot.esi.esi.get_operation(
                        'get_killmails_killmail_id_killmail_hash')
                    killsheet = kill_op.json(killmail_id=ekillID,
                                             killmail_hash=ekillHash)
                except Exception as e:
                    print(e)
                kill_items = killsheet['victim']['items']
                ship = killsheet['victim']['ship_type_id']
                item_cell = []
                row_num = next_available_row(shop)
                for item_type_id in kill_items:
                    print(item_type_id)

            return await ctx.message.add_reaction(chr(0x1f44d))
        else:
            return await ctx.send('Order must be an Evepraisal or Zkill link.')

if __name__ == '__main__':

    # previous version was using the import_csv function from gspread but this was conflicting tiwht google studio as it seems to delete the spreadsheet sheets and create a new one each time, breaking links internal to google studio. We use a batch cells approach now, which works. Reminder of old methods below:
    # content = open(CSV_FILENAME, 'r').read().encode('utf-8')
    # gc.import_csv(SPREADSHEET_ID, content)

    # load csv file
    df = pd.read_csv(CSV_FILENAME)

    cells = []
    # https://github.com/burnash/gspread/issues/515
    for n_column, header in enumerate(df.keys()):
        # first row is header
        cells.append(gspread.Cell(1, n_column + 1, header))
        for n_row, value  in enumerate(df[header]):

            if pd.isna(value):
                value = ''
            else:
                value = str(value)

            cells.append(gspread.Cell(n_row + 2, n_column + 1, value))

    # the size of the dataset can only increase in theory using current pipeline so we do not bother looking into the spreadsheet, etc, we just overwrite

    scope = ['https://spreadsheets.google.com/feeds',
             'https://www.googleapis.com/auth/drive']

    credentials = ServiceAccountCredentials.from_json_keyfile_name(CREDENTIALS_FILENAME, scope)
def gsheets_import(test_results, worksheet, filename, start_col=1):
    """Upload results data to spreadsheet"""

    worksheet_data = worksheet.get_all_values()
    populated_rows = len(worksheet_data)
    if populated_rows == 0:
        # Blank spreadsheet, row 1 will be for column titles
        current_row = 2
    else:
        current_row = populated_rows + 1

    # Columns before start_col reserved for manually entered details
    start_col = max(1, start_col)

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    metadata_cols = 4
    # Columns for counts of Tests, and each Test Status
    state_cols = 1 + len(TEST_STATES)

    # First results column
    results_col = start_col + metadata_cols + state_cols

    # Column after last results
    if populated_rows == 0:
        # Blank spreadsheet
        next_col = results_col
    else:
        next_col = max(results_col, len(worksheet_data[0]) + 1)

    # Test Names
    start_cell_addr = gspread.utils.rowcol_to_a1(1, 1)
    end_cell_addr = gspread.utils.rowcol_to_a1(1, next_col)
    cell_list_names = worksheet.range("{}:{}".format(start_cell_addr,
                                                     end_cell_addr))[:-1]

    # Results
    start_cell_addr = gspread.utils.rowcol_to_a1(current_row, 1)
    end_cell_addr = gspread.utils.rowcol_to_a1(current_row, next_col)
    cell_list_results = worksheet.range("{}:{}".format(start_cell_addr,
                                                       end_cell_addr))[:-1]

    # Columns for Filename, URLs Tested, Timestamp, Test Suite
    current_index = start_col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
    cell_list_names[current_index].value = "Filename"
    cell_list_results[current_index].value = filename
    current_index += 1
    cell_list_names[current_index].value = "URLs Tested"
    try:
        urls_tested = []
        for endpoint in test_results["endpoints"]:
            urls_tested.append("{}:{} ({})".format(endpoint["host"],
                                                   endpoint["port"],
                                                   endpoint["version"]))
        cell_list_results[current_index].value = ", ".join(urls_tested)
    except Exception:
        print(" * WARNING: JSON file does not include endpoints")
        cell_list_results[current_index].value = test_results["url"]
    current_index += 1
    cell_list_names[current_index].value = "Timestamp"
    cell_list_results[current_index].value = (
        datetime.datetime.utcfromtimestamp(
            test_results["timestamp"]).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] +
        'Z')
    current_index += 1
    cell_list_names[current_index].value = "Test Suite"
    try:
        cell_list_results[current_index].value = "{} ({})".format(
            test_results["suite"], test_results["config"]["VERSION"])
    except Exception:
        print(" * WARNING: JSON file does not include test suite version")
        cell_list_results[current_index].value = test_results["suite"]

    # Columns for counts of Tests and each Test Status
    results_addr = "{}:{}".format(
        gspread.utils.rowcol_to_a1(current_row, results_col),
        gspread.utils.rowcol_to_a1(current_row, 1)[1:])

    current_index += 1
    cell_list_names[current_index].value = "Tests"
    # count non-empty cells on rest of this row
    cell_list_results[current_index].value = "=COUNTIF({}, \"?*\")".format(
        results_addr)
    for state in TEST_STATES:
        current_index += 1
        cell_list_names[current_index].value = state
        # count cells on the rest of this row that match this column's status
        current_col_addr = gspread.utils.rowcol_to_a1(
            1, cell_list_names[current_index].col)
        cell_list_results[current_index].value = "=COUNTIF({}, CONCAT({},\"*\"))" \
                                                 .format(results_addr, current_col_addr)

    # Columns for the Results
    for result in test_results["results"]:
        cell_contents = result["state"]
        if result["detail"] != "":
            cell_contents += " (" + result["detail"] + ")"
        col = next(
            (cell.col
             for cell in cell_list_names if cell.value == result["name"]),
            None)
        if col:
            index = col - 1  # list is 0-indexed whereas rows/cols are 1-indexed
            cell_list_results[index].value = cell_contents
        else:
            # Test name not found, append column (since gspread doesn't make it easy to insert one)
            col = cell_list_names[-1].col + 1  # = cell_list_results[-1].col+1
            cell_list_names.append(gspread.Cell(1, col, result["name"]))
            cell_list_results.append(
                gspread.Cell(current_row, col, cell_contents))

    worksheet.update_cells(cell_list_names)
    # 'USER_ENTERED' allows formulae to be used
    worksheet.update_cells(cell_list_results,
                           value_input_option='USER_ENTERED')
Beispiel #25
0
def to_cells(rows):
    cells = ([
        gspread.Cell(row=i, col=j, value=v) for j, v in enumerate(row, 1)
    ] for i, row in enumerate(rows, 1))
    return [cell for row in cells for cell in row]  # flatten