Exemplo n.º 1
0
    async def fetch(id: str, channel: TextChannel) -> List[Game]:
        url = f"https://docs.google.com/spreadsheets/d/{id}"
        try:
            sheets_fetcher = Sheets.from_files(
                "client_secrets.json",
                "oath_cache.json",
            )
            sheets = sheets_fetcher.get(url)
            games: List[Game] = []
            # Must be the first sheet
            # Skip headers
            for row in sheets._sheets[0]._values[1:]:
                try:
                    games.append(
                        Game(
                            datetime.strptime(row[4], "%m/%d/%Y"),
                            Team(json.loads(row[0]), row[1]),
                            Team(json.loads(row[2]), row[3]),
                        ))
                except BaseException as exception:
                    await handle(None, exception)

            return games
        except BaseException as exception:
            await handle(None, exception)
            raise UsageException.game_sheet_not_loaded(channel, url)
Exemplo n.º 2
0
def get_tracked_assets_and_contacts():
    sheets = Sheets.from_files("credentials.json")
    sheet = sheets[SHEET_NAME]
    thresholds = sheet.sheets[0]
    contacts = sheet.sheets[1]

    raw_thresholds = thresholds.values(False)[1:]
    thresholds = list()
    for thr in raw_thresholds:
        try:
            thresholds.append(AlertObject(*thr))
        except TypeError:
            logger.warning("A threshold row had insufficient information")
            pass

    raw_contacts = contacts.values(False)[1:]
    contacts = list()
    for cont in raw_contacts:
        try:
            contacts.append(Contact(*cont))
        except TypeError:
            logger.warning("A contact row had insufficient information")
            pass

    return thresholds, contacts
Exemplo n.º 3
0
    async def fetch(channel: TextChannel):
        try:
            url = GameData.__get_url(channel)
        except BaseException as exception:
            await handle(None, exception)
            raise UsageException.directive_missing(channel, "games")

        try:
            sheets_fetcher = Sheets.from_files(
                "client_secrets.json",
                "oath_cache.json",
            )
            sheets = sheets_fetcher.get(url)
            games: List[Game] = []
            # Must be the first sheet
            # Skip headers
            for row in sheets._sheets[0]._values[1:]:
                try:
                    games.append(
                        Game(
                            datetime.strptime(row[4], "%m/%d/%Y"),
                            Team(json.loads(row[0]), row[1]),
                            Team(json.loads(row[2]), row[3]),
                        ))
                except BaseException as exception:
                    await handle(None, exception)

            return GameData(games)
        except BaseException as exception:
            await handle(None, exception)
            raise UsageException.game_sheet_not_loaded(channel, url)
Exemplo n.º 4
0
def download_and_update_from_google_sheets():
    sheets = Sheets.from_files('~/client_secrets.json', '~/storage.json')
    url = 'https://docs.google.com/spreadsheets/d/idnumberhere'
    s = sheets.get(url)
    s.sheets[2].to_csv(
        'C:/Users/BReyes/Desktop/Bryans_Folder/Game/Ad Revenue/AdDailyRevenue2019.csv',
        encoding='utf-8',
        dialect='excel')

    xl_app = win32com.client.DispatchEx("Excel.Application")
    wb = xl_app.workbooks.open(
        'C:/Users/BReyes/Desktop/Bryans_Folder/Analytics/Flash Reports/Current/Neopets Flash Report.xlsx'
    )
    wb.RefreshAll()
    ws = wb.Worksheets[0]
    ws.Visible = 1
    print_area = 'A1:K42'
    ws.PageSetup.Zoom = False
    ws.PageSetup.FitToPagesTall = 1
    ws.PageSetup.FitToPagesWide = 1
    ws.PageSetup.PrintArea = print_area
    time.sleep(15)
    wb.Save()
    ws.ExportAsFixedFormat(
        0,
        'C:/Users/BReyes/Desktop/Bryans_Folder/Analytics/Flash Reports/Current/Flash Report.pdf'
    )
    xl_app.Quit()

    print(
        'Google sheets file downloaded and saved! Flash report has been updated.'
    )
Exemplo n.º 5
0
def fetch_sheet_data(token_file, spreadsheet_id):
    """
     We fetch the google sheets given spreadsheet id and sheet id and then dump it into csv
     https://docs.google.com/spreadsheets/d/spreadsheetId/edit#gid=sheet_id

     Enable google drive API and Google Sheets API in google developer console. Check these APIs in OAuth consent screen
    """
    sheets_object = Sheets.from_files(token_file, "storages.json")
    sheets = sheets_object[spreadsheet_id]
    return sheets
def get_sheet():
    sheet = Sheets.from_files(secrets=PATH + '/client_secret-sheet.json', storage=PATH + '/storage.json')
    url = 'https://docs.google.com/spreadsheets/d/1oS7en09vBpXiwJjs9zto16US4tfU_bLxdy5EHk32Rsc/edit'
    # url = 'https://docs.google.com/document/d/1YBB75VQWERNKhvT-A9vUh0I-r_ZLPukAnVeXnj2e5SU/edit'
    s = sheet.get(url)
    sheet_list = []
    for i in s:
        sheet_list.append(i)
    print(sheet_list[0])
    sheet_list[0].to_csv(PATH + '/app_config_sicker2.csv')
Exemplo n.º 7
0
    def sync(self):
        sheets = Sheets.from_files(self._config["gsheets_secrets"],
                                   self._config["gsheets_storage"])
        s = sheets.get(self._config["url"])
        path = os.path.abspath(self._config["path"])

        for f in glob.glob(os.path.join(path, "*.csv")):
            os.remove(f)

        s.to_csv(make_filename=os.path.join(path, "%(sheet)s.csv"))
        return [os.path.join(path, f"{i}.csv") for i in s.sheets.titles()]
Exemplo n.º 8
0
def update(
    samples,
    samplesetname,
    stype,
    bucket,
    refworkspace,
    name_col="index",
    values=['legacy_bam_filepath', 'legacy_bai_filepath'],
    filetypes=['bam', 'bai'],
    my_id='~/.client_secret.json',
    mystorage_id="~/.storage.json",
    creds='../.credentials.json',
    sampletrackername='ccle sample tracker',
    refsheet_url="https://docs.google.com/spreadsheets/d/1Pgb5fIClGnErEqzxpU7qqX6ULpGTDjvzWwDN8XUJKIY",
):

    # uploading to our bucket (now a new function)
    terra.changeToBucket(samples,
                         bucket,
                         name_col=name_col,
                         values=values,
                         filetypes=filetypes,
                         catchdup=True,
                         test=False)

    samplesetname
    sheets = Sheets.from_files(my_id, mystorage_id)
    ccle_refsamples = sheets.get(refsheet_url).sheets[0].to_frame(index_col=0)

    names = []
    subccle_refsamples = ccle_refsamples[ccle_refsamples['datatype'] == stype]
    for k, val in samples.iterrows():
        val = val["arxspan_id"]
        names.append(val)
        samples.loc[k, 'version'] = len(subccle_refsamples[
            subccle_refsamples['arxspan_id'] == val]) + names.count(val)
    samples['version'] = samples['version'].astype(int)

    ccle_refsamples = ccle_refsamples.append(samples, sort=False)
    dfToSheet(ccle_refsamples, sampletrackername, secret=creds)

    #uploading new samples to mut
    refwm = dm.WorkspaceManager(refworkspace).disable_hound()
    refwm.upload_samples(samples)
    sam = refwm.get_samples()

    #creating a sample set
    refwm.update_sample_set(sample_set_id=samplesetname,
                            sample_ids=samples.index)
    refwm.update_sample_set(
        sample_set_id='all',
        sample_ids=[i for i in sam.index.tolist() if i != 'nan'])
Exemplo n.º 9
0
def pull_from_sheets(url, new_ratings_path):
    sheets = Sheets.from_files('~/client_secrets.json', '~/storage.json')
    s = sheets.get(url)
    p = s.sheets[0]
    df = p.to_frame()
    last_row = df.iloc[-1]
    new_df = pd.DataFrame(columns=['username', 'recipe_id', 'rating'])
    username = last_row['username']
    for i, col in enumerate(df.columns):
        if col[1] == '[':
            new_df.loc[i] = [username, col[2:-1], last_row[col]]
    new_df.to_csv(new_ratings_path)  # encoding='utf-8', dialect='excel')
    return 1
Exemplo n.º 10
0
def download_csv():  # method to fetch and download spreadsheet
    # get sheets using credentials
    sheets = Sheets.from_files('~\Desktop\OFB_Data\client_secrets.json',
                               '~\Desktop\OFB_Data\storage.json')

    # set url to url of spreadsheet that contains survey entries
    url = 'https://docs.google.com/spreadsheets/d/1TgNM_HA6y2IMD6vEjxNqFzaUmfRtTMqsiHM7jK-16Kc/edit?usp=sharing'

    # get sheet
    s = sheets.get(url)

    # dump first sheet in spreadsheet into csv file
    s.sheets[0].to_csv('ofb_data.csv', encoding='utf-8', dialect='excel')
Exemplo n.º 11
0
def compareToCuratedGS(url,
                       sample,
                       samplesetname,
                       sample_id='DepMap ID',
                       clientsecret='~/.client_secret.json',
                       storagepath='~/.storage.json',
                       colname='CN New to internal',
                       value='no data yet'):
    """
  from a google spreadsheet, will check that we have all of the samples we should have in our sample
  set name (will parse NAME_additional for sample_id)

  Args:
  -----
    url: str the url of the gsheet
    sample: list(str) the samples to check
    samplesetname: str the name of the sampleset in the googlesheet
    sample_id: str the name of the sample_id column in the google sheet
    clientsecret: str path to your secret google api account file
    storagepath: str path to your secret google api storage file
    colname: str if we need not to include some rows from the spreadsheet that have the value value
    value: str the value for which not to include the rows

  @gmiller
  """
    sheets = Sheets.from_files(clientsecret, storagepath)
    # Cell Line Profiling Status google sheet
    gsheet = sheets.get(url).sheets[0].to_frame()
    gsheet.index = gsheet[sample_id]
    new_cn = gsheet[gsheet[colname] == samplesetname + 'tent']
    if colname and value:
        data_not_ready_cn = gsheet[gsheet[colname] == value]
        print(data_not_ready_cn)
    # these are the "new" samples discovered by our function, createDatasetsFromNewCellLines
    sample_ids = [id.split('_')[0] for id in sample]
    print("We found data for " + str(len(sorted(sample))) + " samples.\n")

    print(
        "Sanity check: Since we have the tacked on number, we should only have 1 each per sample ID:\n"
    )
    Counter(sample)

    in_sheet_not_found = set(new_cn.index.tolist()) - set(sample_ids)
    if len(in_sheet_not_found) > 0:
        print("We have not found " + str(len(in_sheet_not_found)) +
              " of the samples we're supposed to \
      have this release:\n" + str(sorted(list(in_sheet_not_found))))
    else:
        print(
            "We aren't missing any samples that we're supposed to have this release!"
        )
Exemplo n.º 12
0
def get_expected_lines(sheets_url, merge_ibm_dmc=True):
    sheets_obj = Sheets.from_files('~/.client_secret.json', '~/.storage.json')
    sheets = sheets_obj.get(sheets_url).sheets
    release = sheets[0].to_frame(header=0, index_col=None)
    release.columns = release.columns.str.lower()
    if merge_ibm_dmc:
        cols = release.columns
        ibm = pd.DataFrame(set(release[['dmc', 'ibm']].stack().values),
                           columns=['ibm'])
        # ibm = pd.concat([release['dmc'], release['ibm']]
        #         ).dropna().drop_duplicates().to_frame('ibm')
        release = pd.concat([release[['internal', 'dmc', 'public']], ibm],
                            axis=1)
        lines_to_release = release[cols]
    return release
Exemplo n.º 13
0
def get_saco():
    '''
	SACO is recorded in a separate Google Sheet as of 201905
	'''
    sheets = Sheets.from_files('./client_secret.json', './storage.json')
    fileId = saco_sheet
    url = 'https://docs.google.com/spreadsheets/d/' + fileId
    s = sheets.get(url)
    sheet_index = int(thisrun[-2:]) - 1  # sheet index should equal month

    s.sheets[sheet_index].to_csv(sacocsv, encoding='utf-8', dialect='excel')

    msg = 'SACO Google Sheet for %s saved to csv' % thisrun
    print(msg)
    logging.info(msg)
Exemplo n.º 14
0
def UpdateTables():
    global GoogleDriveWorkbookName
    global ClassesWorksheetName
    global RacesWorksheetName
    global StatPriorityWorksheetName
    global PointBuyOptionsWorksheetName 
    global StatBonusesWorksheetName
    global CRtoXPWorksheetName
    global LevelToXPWorksheetName 

    global ClassTable
    global RaceTable 
    global StatPriorityTable
    global PointBuyOptionsTable
    global StatBonusTable
    global CRtoXPTable
    global LevelToXPTable

    sheets = Sheets.from_files("./credentials.json", "./storage.json")
    book = sheets.find(GoogleDriveWorkbookName)

    # Classes table
    classSheet = book.find(ClassesWorksheetName)
    PopulateTable(classSheet, ClassTable)
    
    # Race table
    raceSheet = book.find(RacesWorksheetName)
    PopulateTable(raceSheet, RaceTable)

    # Stat priority table
    spSheet = book.find(StatPriorityWorksheetName)
    PopulateTable(spSheet, StatPriorityTable)

    # Point-buy options table
    pbSheet = book.find(PointBuyOptionsWorksheetName)
    PopulateTable(pbSheet, PointBuyOptionsTable)

    # Stat bonuses table
    sbSheet = book.find(StatBonusesWorksheetName)
    PopulateTable(sbSheet, StatBonusTable)

    # CR to XP table
    crSheet = book.find(CRtoXPWorksheetName)
    PopulateTable(crSheet, CRtoXPTable)

    # Levels to XP table
    levelSheet = book.find(LevelToXPWorksheetName)
    PopulateTable(levelSheet, LevelToXPTable)
Exemplo n.º 15
0
def get_nafprod():
    '''
	This is the replacement for get_naco()
	'''
    sheets = Sheets.from_files('./client_secret.json', './storage.json')
    fileId = nafprod_sheet
    url = 'https://docs.google.com/spreadsheets/d/' + fileId
    s = sheets.get(url)
    sheet_index = int(
        thisrun[-2:]) - 1  # sheet index (0 based) should equal month

    s.sheets[sheet_index].to_csv(nafcsv, encoding='utf-8', dialect='excel')

    msg = 'NAFProduction Google Sheet for %s saved to csv' % thisrun
    print(msg)
    logging.info(msg)
Exemplo n.º 16
0
def main():
    # print("Checking environment variable TIMESHEET_URL for spreadsheet URL...")
    timesheet_url = os.environ.get('TIMESHEET_URL', "").strip()
    if not timesheet_url:
        raise Exception("Please set the TIMESHEET_URL environment variable accordingly.")
    # print("Checking environment variable USER_FULL_NAME for spreadsheet URL...")
    user_full_name = os.environ.get('USER_FULL_NAME', "").strip()
    if not user_full_name:
        print("Warning: USER_FULL_NAME environment variable not set!")
        user_full_name = "Herman Toothrot"

    print("")
    print("Usage:   python timesheet.py [command|date] [date]")
    print("Example: python timesheet.py stats 202011")
    print("Example: python timesheet.py 20201130")
    print("")
    print("Available commands:")
    print("- stats: show summed up hours and minutes for the given/current month")
    print("         use \"CSV=1 python timesheet.py stats\" to format the output")
    print("         as CSV")
    print("- daily: same as stats, except ready to email to HR")
    print("- csv: task breakdown for the month and time spend on each task")
    print("")
    print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""")
    print("")

    print("Trying to load client-secrets.json file ...")
    secrets_file, cache_file = get_client_secret_filenames()
    sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)
    print("Success.")

    date = None if len(sys.argv) < 3 else sys.argv[2].strip()
    arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip()

    if arg == "stats":
        calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
    elif arg == "daily":
        calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
    elif arg == "csv":
        export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
    else:
        date_to_use = "read today" if arg == '' else arg
        load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)

    print("Done.")
Exemplo n.º 17
0
def download_networks():
    """
    This function downloads curated network files (edges and nodes) from spreadsheets in Google Drive as csv files.
    """

    # create dir: curation/data
    data_path = os.getcwd() + '/curation/data/' + version
    if not os.path.isdir(data_path): os.makedirs(data_path)

    # download to dir edges and nodes
    ###
    # url-sheet1=https://docs.google.com/spreadsheets/d/1ZF0cLyAN2_LPXbWNVss2yg2de7fvmHi21Zs7r1vFP54/edit#gid=0
    # url-sheet2=https://docs.google.com/spreadsheets/d/1ZF0cLyAN2_LPXbWNVss2yg2de7fvmHi21Zs7r1vFP54/edit#gid=841865829
    # INSTALL: https://pypi.python.org/pypi/gsheets/0.3
    # https://pypi.python.org/pypi/pygsheets (more mature but under dev)
    # https://developers.google.com/sheets/api/quickstart/python
    ###
    # access to my drive files
    print(
        '\nConnecting Google Drive to start the download process of the curated networks..\n'
    )
    driveFilesObject = Sheets.from_files('/home/nuria/client_secrets.json',
                                         '/home/nuria/storage.json')

    # create a list for curated network files spreadsheets (with Google Drive API?)
    spreadsheetsIds_dct = {
        '1pS3rT1hFShsCalu9bLGpAjQWp4y3_mgDxCtcHMAk2I8': 'ngly1_deficiency',
        '1z4PrO8AuNqyAOY3UMYxaMQ1JUcL5z-IbWsw54yQ8sYA': 'ngly1_human',
        '1thcKRGY1TnXepI8BJ6MYDhOgCayt3gEdPkOkFO4Nd4M': 'aqp1_human',
        '17kpta304URAxgd0NN4Uvyyu_qC1n2KObOrNAkFCQzRU': 'aqp1_mouse',
        '1ZF0cLyAN2_LPXbWNVss2yg2de7fvmHi21Zs7r1vFP54': 'glcnac_human',
        '17jXa5f_B74JaT8yuhExRNIozDRRNnPdcfM7yiV4BRtk': 'enns_2014',
        '1ZCfOdYtXn2mda2ybov8ibk0aXOtIBYDNw0RIoqkHldk': 'lam_2016'
    }
    spreadsheetIds_list = spreadsheetsIds_dct.keys()

    # download curated network files (edges and nodes)
    csv_name = lambda dct: '%s/%s.csv' % (data_path, dct.get('sheet'))
    #csv_name = lambda dct: '%s/%s-%s.csv' % (data_path, dct.get('title'), dct.get('sheet'))
    for sp_id in spreadsheetIds_list:
        driveFilesObject[sp_id].to_csv(make_filename=csv_name)

    return print('\nDownload process finished.\nFiles located at: {}.'.format(
        data_path))
def build_dict_from_sheet():
    sheets = Sheets.from_files('credential.json', 'storage.json')
    url = 'https://docs.google.com/spreadsheets/d/1hg9505a4hmp93pssg3hnZkPBSEhoR0X2w3i_MApocZ8/edit#gid=1621532970'
    sheet = sheets.get(url)
    cols = ['B', 'D']
    data = DataFrame()
    data = sheet.sheets[0].to_frame()
    data = data.rename(columns={'Every class you have taken: Format-> Course_Name(Grade/TA)':'Courses'})
    data = data[['Name', 'Courses']]
    for index, row in data.iterrows():
        courses = cleanup_data(row['Courses'])
        for c in courses:
            # print(c.split('('))
            course_name, grade = c.split('(')
            if (course_name, row["Name"].lower()) not in added_set:
                added_set.add((course_name, row['Name']))
                is_ta = grade[:-1].lower() == 'ta'
                grade = grade[:-1] if not is_ta else 'A'
                course_directory[course_name].append({'Name' : row['Name'], 'grade' : grade, 'ta' : is_ta})
        print(course_directory)
Exemplo n.º 19
0
def download_onlinesave():
    '''
	Download onlinesave for local parsing.
	'''
    sheets = Sheets.from_files(conf_dir + 'client_secret.json',
                               conf_dir + 'storage.json')
    fileId = online_save_id
    url = 'https://docs.google.com/spreadsheets/d/' + fileId

    s = sheets.get(url)
    sheet_index = 0

    oscsv = 'OnlineSave%s.csv' % this_year

    s.sheets[sheet_index].to_csv(oscsv, encoding='utf-8', dialect='excel')

    msg = '= OnlineSave Google Sheet saved to csv'
    if verbose:
        print(msg)
    logging.info(msg)
Exemplo n.º 20
0
def convert_to_json():
    global sheet, sheet_id
    sheets = Sheets.from_files('./google_api_client_keys/credentials.json')
    sheet = sheets[sheet_id]

    companies = []
    for ws in sheet:
        for row in range(1, ws.nrows):
            companies_sub = {
                "Name": ws.at(row, 1),
                "Location": ws.at(row, 0)
            }
            companies.append(companies_sub)

        unique_keys = []
        for i in range(0, len(companies)):
            if companies[i] not in companies[i+1:]:
                unique_keys.append(companies[i])
        comp = json.dumps(list(unique_keys))

    with open('./static/assets/companies.json', 'w') as fh:
        fh.write(comp)
Exemplo n.º 21
0
    def __init__(self,
                 url,
                 sh_name='Form Responses 1',
                 credentials='from_drive_to_xlsx/config/credentials.json',
                 storage_json='from_drive_to_xlsx/config/storage.json'):
        """
        Parameters
        ----------
        url: string
            URL to access the workbook
        sh_name: string, default 'BDD Missions'
            Name of the sheet name with all projects
        credentials: string, default 'config/credentials_gsheets.json'
            Path to access the JSON credentials file
        storage_json: string, default 'config/storage.json'
            Path to access the JSON storage file
        """
        workbook = Sheets.from_files(credentials, storage_json).get(url)
        self.wb = workbook

        self.sh_name = sh_name
        self.ws = workbook.find(sh_name)
Exemplo n.º 22
0
from gsheets import Sheets
from web3 import Web3, HTTPProvider, IPCProvider

web3 = Web3(HTTPProvider('http://localhost:8545'))

sheets = Sheets.from_files('./client_secret.json', './storage.json')
s = sheets['1xKi8R1E1tlMRwxkuFxtj5o_6-0jA7tAcAkFvAGLai7E']

q_id = s.sheets[0]['H2']
Exemplo n.º 23
0
def shareCCLEbams(users, samples, groups=[], raise_error=True, arg_max_length=100000, bamcols=["internal_bam_filepath", "internal_bai_filepath"],
                  refsheet_url="https://docs.google.com/spreadsheets/d/1XkZypRuOEXzNLxVk9EOHeWRE98Z8_DBvL4PovyM01FE",
                  privacy_sheeturl="https://docs.google.com/spreadsheets/d/115TUgA1t_mD32SnWAGpW9OKmJ2W5WYAOs3SuSdedpX4"):
  """
  same as shareTerraBams but is completed to work with CCLE bams from the CCLE sample tracker

  You need to have gsheet installed and you '~/.client_secret.json', '~/.storage.json' set up

  Args:
  ----
    users: list[str] of users' google accounts
    groups: list[str] of groups' google accounts
    samples list[str] of samples cds_ids for which you want to share data
    bamcols: list[str] list of column names where bams/bais are
    raise_error: whether or not to raise an error if we find blacklisted lines
    refsheet_url: the google spreadsheet where the samples are stored
    privacy_sheeturl: the google spreadsheet where the samples are stored

  Returns:
  --------
    a list of the gs path we have been giving access to
  """
  sheets = Sheets.from_files('~/.client_secret.json', '~/.storage.json')
  print("You need to have gsheet installed and you '~/.client_secret.json', '~/.storage.json' set up")
  privacy = sheets.get(privacy_sheeturl).sheets[6].to_frame()
  refdata = sheets.get(refsheet_url).sheets[0].to_frame(index_col=0)
  blacklist = [i for i in privacy['blacklist'].values.tolist() if i is not np.nan]
  blacklisted = set(blacklist) & set(samples)
  print("we have " + str(len(blacklist)) + ' blacklisted files')
  if len(blacklisted):
    print("these lines are blacklisted " + blacklisted)
    if raise_error:
      raise ValueError("blacklistedlines")
  if type(users) is str:
    users = [users]

  togiveaccess = np.ravel(refdata[bamcols].loc[samples].values)
  usrs = ""
  for group in groups:
    usrs += " -g " + group + ":R"
  for user in users:
    usrs += " -u " + user + ":R"
  cmd_prefix = "gsutil -m acl ch " + usrs
  cmd = cmd_prefix
  for n, filename in enumerate(togiveaccess):
    oldcmd = cmd
    cmd += ' ' + filename
    if (len(cmd) > arg_max_length) | (n==len(togiveaccess)-1):
        if n < len(togiveaccess)-1:
            cmd = oldcmd
        print('granting access to {:d} files'.format(n))
        with open('/tmp/grantaccess{:d}.sh'.format(n), 'w') as f:
          f.write(cmd)
        code = os.system(cmd)
        cmd = cmd_prefix + ' ' + filename
        if code == signal.SIGINT:
          print('Awakened')
          return

  print('the files are stored here:\n\n' + refsheet_url)
  print('\n\njust install and use gsutil to copy them')
  print('https://cloud.google.com/storage/docs/gsutil_install')
  print('https://cloud.google.com/storage/docs/gsutil/commands/cp')
  return togiveaccess
Exemplo n.º 24
0
    try:
        if not os.path.exists(directory):
            os.makedirs(directory)
    except OSError:
        logging.info('Error: Creating directory. ' + directory)


# Create CSV file folder locally if does not exist
logging.info("Creating CSV folder " + downloadFolder + ".")
createFolder(downloadFolder)

# Authenticate Connection
try:
    # See: https://pypi.org/project/gsheets/
    # Initalize sheet object with authentication
    sheets = Sheets.from_files('~/client_secrets.json', '~/storage.json')
    # Uncomment and run for inital authentication
    # sheets  #doctest: +ELLIPSIS
except OSError:
    logging.info('Error: Could not authenticate.  Check json files in path.')

# Interate through sheet list and download to local CSV files
for workbook in workbookList:
    dashboardID = workbook[0]
    # Create book object for Dashboard Sheet
    dashb = sheets[dashboardID]
    # Get workbook name
    title = dashb.title
    # Download all sheets in list 'sheetsToDownload'
    sheetsToDownload = workbook[1]
    for sheetToDownload in sheetsToDownload:
Exemplo n.º 25
0
 def __init__(self, credentials: str, sheet_id: str):
     self.sheet_id = sheet_id
     self.sheets = Sheets.from_files(credentials)
     self.sheet = self.sheets.get(self.sheet_id)
     self.metadata = self.get_metadata()
Exemplo n.º 26
0
    def updateCSV(self):
        # Sheets object
        self.sheets = Sheets.from_files('client_secrets.json', 'storage.json')
        # GoogleSheets URL
        self.url = 'https://docs.google.com/spreadsheets/d/1qz_1Fy-2Sda76fe8bIsD4Ci0aO25cmUTMJVCHAqO7NI/edit#gid=1542267929'
        # Get Poker History Sheets
        s = self.sheets.get(self.url)
        # Update CSV files
        s.sheets[0].to_csv('Game_History.csv',
                           encoding='utf-8',
                           dialect='excel')
        s.sheets[1].to_csv('Player_Stats.csv',
                           encoding='utf-8',
                           dialect='excel')
        s.sheets[2].to_csv('Player_Info.csv',
                           encoding='utf-8',
                           dialect='excel')

        # Load Game History
        with open(self.gameHistory, encoding='utf-8') as sheet:
            reader = csv.reader(sheet, delimiter=',')
            # Parse Games into games structure
            row_index = 0
            gameNum = 0
            date = 'date'
            game = []
            for row in reader:
                if row_index == 0:
                    row_index += 1
                    continue
                if row:
                    if (row[0]):
                        date = row[0]
                    if (row[2] == 'Total'):
                        columns = [row[2]] + [
                            round(float(num), 2) for num in row[4:6]
                        ]
                    else:
                        if (row[7] == 'n'):
                            self.outstanding.append(
                                [date, row[2],
                                 round(float(row[6]), 2)])
                        columns = [row[2]] + [row[3]] + [
                            round(float(num), 2) for num in row[4:7]
                        ] + [row[7]]
                    game.append(columns)
                else:
                    self.games.append(game)
                    game = []
                    gameNum += 1
                row_index += 1
            # Last Game needs to be added
            self.games.append(game)
            self.numGames = gameNum
            print(self.outstanding)

        # Load Player Stats
        with open(self.playerStats, encoding='utf-8') as sheet:
            reader = csv.reader(sheet, delimiter=',')
            row_index = 0
            for row in reader:
                if row_index == 0:
                    row_index += 1
                    continue
                columns = [row[0], row[1]
                           ] + [round(float(num), 2) for num in row[2:7]]
                self.rank.append(columns)
                self.totaloutstanding.append([row[1], round(float(row[5]), 2)])
Exemplo n.º 27
0
 def __init__(self, credentials) -> None:
     self.credentials = credentials
     self.sheets = Sheets.from_files(credentials)
Exemplo n.º 28
0
                  key='035f240c26134eae472e',
                  secret='b0f4c7015ee1638bcbcf',
                  cluster='ap1',
                  ssl=True)
]

config = {
    "apiKey": "AIzaSyAFQWHKQcfdZ0GmxwFzohjPjRm6vwcbREM",
    "authDomain": "mael-c2ed5.firebaseapp.com",
    "databaseURL": "https://mael-c2ed5.firebaseio.com",
    "storageBucket": "mael-c2ed5.appspot.com"
}

firebase = pyrebase.initialize_app(config)
database = firebase.database()
sheets = Sheets.from_files('credentials.json')
url = 'https://docs.google.com/spreadsheets/d/1v_qSadYXZzS0TFuQ-vbmQKR95kKzFxiAvt-DQLwXeX8'


def error():
    print("ERROR")
    try:
        database.child("Error").set(1)
    except:
        print("DATABASE ERROR (CHECK INTERNET)")


def get_client():
    Id = database.child('Id').get().val()
    Terminate = database.child('Terminate').get().val()
    if Terminate == 1:
Exemplo n.º 29
0
#Mail Package
import smtplib
from email.message import EmailMessage

#google sheet package
from gsheets import Sheets

#Authorizing the api
sheets = Sheets.from_files('FD4GS.json','FD4GS_cache.json')



#Fetching information from owners database
vi1 = sheets.get('1URQBjLmkRkPxI1RGQFpp_ZxUa1xMcoMth3qmWH9DK2Y') # Vehicle Information

vi1_form1_ws = vi1.sheets[0]

entries1 = vi1_form1_ws.values()[1:]

entries1 = [(i[2],i[6]) for i in entries1]



#Fetching information from search database
vi2 = sheets.get('1i2_N7yqcJQQ7cpBJqR8KnxpUOPh3TgNvY8oC1mCSv0U') # Search Database

vi2_form1_ws = vi2.sheets[0]

entries2 = vi2_form1_ws.values()[1:]

entries2 = [(j[1],j[2]) for j in entries2]
Exemplo n.º 30
0
spread.df_to_sheet(df,
                   index=False,
                   sheet='New Test Sheet',
                   start='A2',
                   replace=True)
spread.update_cells((1, 1), (1, 2), ['Created by:', spread.email])

print(spread)
# <gspread_pandas.client.Spread - User: '******', Spread: 'Example Spreadsheet', Sheet: 'New Test Sheet'>

#%%
#this works
from gsheets import Sheets

sheets = Sheets.from_files('~/.config/gspread_pandas/google_secret.json',
                           '~/.config/gspread_pandas/storage.json')
sheets  #doctest: +ELLIPSIS

url = 'https://docs.google.com/spreadsheets/d/1s-FFmQQdOJ1DdJsJneTWanF096REzaXRxD7dpKAZr_s/edit#gid=0'
s = sheets.get(url)
s
data = s.find('Sheet1').to_frame(index_col='businessid')

data.head()

#%%
# This also works
import gspread
from oauth2client.service_account import ServiceAccountCredentials

scope = ['https://spreadsheets.google.com/feeds']