コード例 #1
0
def to_sheet(message):
    """Send data from csv at ./assets/csv/ folder to google sheets"""

    arg = extract_args(message.text)
    file_uri = f"./assets/csv/kiem_kho_{str(arg)}.csv"
    try:
        COLS = ["Date", "User", "OrderCodes"]
        df_csv = pd.read_csv(file_uri, names=COLS)
        print(df_csv)
        # then upload new data to sheets
        bot.send_message(message.chat.id, "Đang xử lý...")
        # update new data to sheets
        df2g.upload(
            df_csv,
            SPREADSHEET_ID,
            INVENTORY_NAME,
            credentials=credentials,
            row_names=False,
            col_names=True,
            clean=True,
        )
        bot.send_message(message.chat.id, "Đã push lên google sheets.")
    except FileNotFoundError:
        bot.send_message(
            message.chat.id,
            f"File ngày {arg} không tồn tại trên hệ thống. Vui lòng thử lại",
        )
コード例 #2
0
ファイル: routes.py プロジェクト: johngcrowley/sukho_website
def export(location):
    dataframe = pd.read_sql(
        '''
        SELECT tips.employee_id, tips.location, tips.time, tips.crew_id, employee.position, employee.name, 
        tips.tips, "Crews".created_at FROM "Crews"
        JOIN tips ON tips.crew_id="Crews".id JOIN employee ON employee.id=tips.employee_id''',
        db.session.bind)

    dataframe['date'] = pd.to_datetime(dataframe['created_at']).dt.date

    loc = location

    if loc == 'Marigny':
        marigny = dataframe.loc[dataframe['location'] == 'Marigny']
        marigny_df = df_prep(marigny)
        marigny_df = tip_out_prep(marigny_df)
        d2g.upload(marigny_df,
                   spreadsheet_key,
                   wks_name[0],
                   credentials=creds,
                   row_names=True)
    elif loc == 'Uptown':
        uptown = dataframe.loc[dataframe['location'] == 'Uptown']
        uptown_df = df_prep(uptown)
        uptown_df = tip_out_prep(uptown_df)
        d2g.upload(uptown_df,
                   spreadsheet_key,
                   wks_name[1],
                   credentials=creds,
                   row_names=True)
    else:
        return redirect('/')
    msg = 'Success! \n\n Check out your Google Sheet for updated records'
    return render_template('success.html', msg=msg)
def _write_sheets_table(df, freq, config, drive_config):

    credentials, gc = _get_credentials()

    spreadsheet_key = drive_config["spreadsheet_key"]
    wks_name = freq
    d2g.upload(df, spreadsheet_key, wks_name, row_names=False, credentials=credentials)
コード例 #4
0
ファイル: exporters.py プロジェクト: DBeZ/BranchReport
def export_to_google_sheets(user_login_dict, dataframe, tab_name):
    spreadsheet_id = user_login_dict["spreadsheet_id"]
    wks_name = tab_name
    scope = ['https://www.googleapis.com/auth/spreadsheets'
             ]  #Sets files that can be access on the drive.
    try:
        working_folder = os.getcwd()
        os.chdir("..")
        os.chdir(user_login_dict["certificates_folder"])
        jsonFile = user_login_dict["api_login_json"]
        credentials = service_account.ServiceAccountCredentials.from_json_keyfile_name(
            jsonFile, scope)
        gc = gspread.authorize(credentials)
        os.chdir("..")
        d2g.upload(dataframe,
                   spreadsheet_id,
                   wks_name,
                   credentials=credentials,
                   row_names=True
                   )  #TODO change so int and not str are uploaded into sheet
        os.chdir(working_folder)
        print("Export to google sheets complete")
    except:
        os.chdir(working_folder)
        print("Export to google sheets failed")
        print("Report will be saved instead")
        export_to_csv(dataframe, user_login_dict["result_file_name"])
コード例 #5
0
def store_data_to_google_sheet(database_list, row_nums, sheet_name,
                               credentials, cursor):

    for i in range(len(database_list)):

        sqlStr = """
		SHOW FULL columns FROM {}
		""".format(database_list[i])
        cursor.execute(sqlStr)
        print('executed\n')

        df = pd.DataFrame(list(cursor.fetchall()))

        NameCol = pd.DataFrame(df[0])
        TypeCol = pd.DataFrame(df[1])
        isNullCol = pd.DataFrame(df[3])
        DefaultCol = pd.DataFrame(df[5])
        commentCol = pd.DataFrame(df[8])

        data_cols = [NameCol, TypeCol, isNullCol, DefaultCol, commentCol]
        col_letter = ['E', 'H', 'K', 'G', 'N']

        for col in range(len(data_cols)):
            d2g.upload(df=data_cols[col],
                       gfile='1-fNBj4V266e6D5HNGRNZ6Kfi02kVXfRsnlvEa7PNflQ',
                       start_cell=col_letter[col] + row_nums[i],
                       wks_name=sheet_name,
                       col_names=False,
                       row_names=False,
                       clean=False,
                       credentials=credentials,
                       df_size=False)
            print(col)

        print(database_list[i] + " finished upload")
コード例 #6
0
def main():
    ## create mint object
    mint = create_mint_objs(MINT_USERNAME, MINT_PASSWORD)
    mint.get_accounts(True)

    # Get transactions
    #mint.get_transactions() # as pandas dataframe
    transaction_json = mint.get_transactions_json(include_investment=False, 
                                                  skip_duplicates=True)

    ## get transaction
    transaction_df = pd.DataFrame(transaction_json)
    transaction_df["amount"] = transaction_df["amount"].apply(lambda x: Decimal(sub(r'[^\d.]', '', x)))
    transaction_df["amount"] = transaction_df["amount"].astype(float)

    print("shaping data")
    credit_card = get_credit_card(transaction_df)
    venmo = get_venmo(transaction_df)

    print("fetching credentials")
    credentials = ServiceAccountCredentials.from_json_keyfile_name(GSPREAD_KEY, GSPREAD_SCOPE)
    gc = gspread.authorize(credentials)

    print("uploading to gsheets")
    d2g.upload(credit_card, SPREADSHEET_ID, "Credit Card", credentials=credentials, row_names=True)
    d2g.upload(venmo, SPREADSHEET_ID, "Venmo", credentials=credentials, row_names=True)
コード例 #7
0
def write_data_to_gsheets(df_to_write):
    '''
    Write dataframe to Google Sheets.
    '''
    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        MY_JSON_KEYFILE_NAME, scope)

    gc = gspread.authorize(credentials)
    spreadsheet_key = MY_SPREADSHEET_KEY

    sh = gc.open(MY_SPREADSHEET_NAME)
    sh.share(MY_EMAIL, perm_type='user', role='writer')

    worksheet_name = str(datetime.today().strftime('%Y-%m-%d'))

    d2g.upload(df_to_write,
               spreadsheet_key,
               worksheet_name,
               credentials=credentials,
               row_names=True)

    return print('Data successfully loaded!')
コード例 #8
0
def move_stuff_to_google_sheets(df=None,
                                spreadsheet_key='',
                                wks_name='',
                                text='',
                                row_names=False):
    ## now to google
    import gspread
    from df2gspread import df2gspread as d2g
    from oauth2client.service_account import ServiceAccountCredentials

    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        'googlecoviddata.json', scope)
    gc = gspread.authorize(credentials)

    print("***LOADING TO GOOGLE ***")
    d2g.upload(df,
               spreadsheet_key,
               wks_name,
               credentials=credentials,
               row_names=row_names)
    print("***LOADING TO GOOGLE - DONE ***")
    print("*** DONE WITH", str(text), " ***")
コード例 #9
0
def send_to_sheets(totals_deflection, spreadsheet_key, path_to_spred_key):
    df_to_sheets = pd.DataFrame(list(totals_deflection.items()),
                                columns=['Event', 'DELTA'])

    # Configure the connection
    scope = ['https://spreadsheets.google.com/feeds']

    # Give the path to the Service Account Credential json file
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        path_to_spred_key + 'creds_gsheets.json', scope)

    # Authorise
    gc = gspread.authorize(credentials)

    # The sprad sheet ID, which can be taken from the link to the sheet
    wks_name = 'Sheet1'
    cell_of_start_df = 'A1'

    # upload the dataframe of the clients we want to delete
    d2g.upload(df_to_sheets,
               spreadsheet_key,
               wks_name,
               credentials=credentials,
               col_names=True,
               row_names=False,
               start_cell=cell_of_start_df,
               clean=False)
    print('The sheet is updated successfully')
コード例 #10
0
def updateRanks(credentials: 'credentials', season: int, week: int) -> None:
    """Update the Website Google Sheets for all Ranks. This will update each character,
    the MidTiers, and Other"""
    spreadsheet_key = '1gwi2PAyjBA2peLLexP2NGO2mfh_J-AbH8uxkAf3xt_s'
    wks_name = 'Ranks'
    websiteSheets = [
        'Fox', 'Falco', 'Marth', 'Sheik', 'Falcon', 'Puff', 'Peach', 'ICs',
        'MidTiers', 'Other'
    ]
    print("Ranks")

    characters = f'Data/Season{season}/PlotsWebsite/S{season}W{week}RankLegend.csv'
    characters = pd.read_csv(characters, encoding='ISO-8859-1')

    for i in range(len(websiteSheets)):
        character = websiteSheets[i]
        df = characters[characters['Character'] == character]
        df = df[['SmashTag', 'Rank']]
        df['Rank'] = df['Rank'].astype(int)
        d2g.upload(df,
                   spreadsheet_key,
                   character,
                   credentials=credentials,
                   row_names=False)
        time.sleep(10)
        print(character)
コード例 #11
0
def main():
    # Initialize API access
    FacebookAdsApi.set_default_api(initialize_facebook())

    # Authorize credentials with Google Drive
    gc = initialize_drive()

    # Get account connected to the user
    # [3] may not be your account, find the right account, try [0] first
    my_account = AdAccountUser(fbid = 'me').get_ad_accounts()[3]

    # Download existing or make dataframe and the last week
    existing_totals, start_date_totals = download_df(WORKSHEETS[0])
    existing_details, start_date_details = download_df(WORKSHEETS[1])

    # Getting the sweet data
    all_data_totals = loop_adding_weeks_totals(my_account, existing_totals, start_date_totals, INSIGHT_FIELDS)
    all_data_details = loop_adding_weeks_details(my_account, existing_details, start_date_details, INSIGHT_FIELDS)

    # Upload the transformed data to Google Sheets
    d2g.upload(
        df = all_data_totals, 
        gfile = SPREADSHEET, 
        wks_name = WORKSHEETS[0]
        )
    d2g.upload(
        df = all_data_details, 
        gfile = SPREADSHEET, 
        wks_name = WORKSHEETS[1]
        )
コード例 #12
0
def update_df_in_gdrive(collection, worksheet_name="קטלוג סופי", copy=False):
    credentials = get_google_drive_credentials()
    if worksheet_name == "קטלוג סופי":
        d2g.upload(
            collection.full_catalog.applymap(str),
            collection.google_sheet_file_id,
            worksheet_name,
            credentials=credentials,
            row_names=True,
        )
        gc = gspread.authorize(credentials)
        sh = gc.open_by_key(collection.google_sheet_file_id)
        if "מספרי מערכת חסרים" in sh.worksheets():
            sh.del_worksheet("מספרי מערכת חסרים")
        if copy:
            client = connect_to_google_drive()
            spreadsheet = client.copy(
                collection.google_sheet_file_id,
                collection.collection_id + "_Final_to_Alma_" +
                collection.dt_now,
            )
            collection.google_sheet_file_id = spreadsheet.id
            collection.create_catalog_metadata_file()
    else:
        d2g.upload(
            collection.missing_records.applymap(str),
            collection.google_sheet_file_id,
            worksheet_name,
            credentials=credentials,
            row_names=True,
        )
コード例 #13
0
def updatePoints(credentials: 'credentials', season: int, week: int) -> None:
    """Update the Website Google Sheets for all Bills. This will update each character,
    the MidTiers, and Other"""
    spreadsheet_key = '12Q-krFLMSpc-AuLH2KGjTz9SoAwC1ob1spxmVW7XSfY'
    wks_name = 'Bills'
    websiteSheets = [
        'Fox', 'Falco', 'Marth', 'Sheik', 'Falcon', 'Puff', 'Peach', 'ICs',
        'MidTiers', 'Other'
    ]
    print("Points")

    characters = f'Data/Season{season}/PlotsWebsite/S{season}W{week}PointsLegend.csv'
    characters = pd.read_csv(characters, encoding='ISO-8859-1')

    for i in range(len(websiteSheets)):
        character = websiteSheets[i]
        df = characters[characters['Character'] == character]
        df = df.rename(columns={'Points': 'Bills'})
        df['Bills'] = df['Bills'].astype(int)
        df = df[['SmashTag', 'Bills']]
        d2g.upload(df,
                   spreadsheet_key,
                   character,
                   credentials=credentials,
                   row_names=False)
        time.sleep(10)
        print(character)
コード例 #14
0
ファイル: typeform.py プロジェクト: kejbaly2/devconfcz
def save(obj, csv, upload, html, path):
    proposals = obj['proposals']
    if not (csv or upload or html):
        csv = True

    if csv:
        path = path or './'
        path = os.path.join(path, "devconfcz_proposals.csv")
        f = open(path, 'w')
        proposals.to_csv(f)

    if upload:
        path = path or 'devconfcz_proposals'

        wks = 'Submissions MASTER'  # "update" the existing sheet

        # grab only the items we don't already have so we
        # can APPEND them to the sheet rather than rewritting
        # the whole sheet
        start_cell, col_names, proposals = _diff_submissions(path, wks,
                                                             proposals)
        if not proposals.empty:
            d2g.upload(proposals, path, wks, start_cell=start_cell,
                       clean=False, col_names=col_names)
        else:
            print("No new proposals to upload... QUITTING!")

    if html:
        print(proposals.style.render())
コード例 #15
0
def save(obj, csv, upload, html, path):
    proposals = obj['proposals']
    if not (csv or upload or html):
        csv = True

    if csv:
        path = path or './'
        path = os.path.join(path, "devconfcz_proposals.csv")
        f = open(path, 'w')
        proposals.to_csv(f)

    if upload:
        path = path or 'devconfcz_proposals'

        wks = 'Submissions MASTER'  # "update" the existing sheet

        # grab only the items we don't already have so we
        # can APPEND them to the sheet rather than rewritting
        # the whole sheet
        start_cell, col_names, proposals = _diff_submissions(
            path, wks, proposals)
        if not proposals.empty:
            d2g.upload(proposals,
                       path,
                       wks,
                       start_cell=start_cell,
                       clean=False,
                       col_names=col_names)
        else:
            print("No new proposals to upload... QUITTING!")

    if html:
        print(proposals.style.render())
コード例 #16
0
def timelinetosheets(dictionary,sheetid,worksheet):
    jsonfile = './configuration/'+config['JSON file from Google']
    import gspread
    from df2gspread import df2gspread as d2g
    import oauth2client
    from oauth2client.service_account import ServiceAccountCredentials
    scope = ['https://spreadsheets.google.com/feeds',
             'https://www.googleapis.com/auth/drive']
    gcredentials = ServiceAccountCredentials.from_json_keyfile_name(jsonfile , scope)

    client = gspread.authorize(gcredentials)
    wks = client.open_by_key(sheetid)
    x = 0
    sheetnames = []
    try:
        while wks.get_worksheet(x) != None:
            sheetnames.append(wks.get_worksheet(x).title)
            x += 1
    except:
        pass
    if not worksheet in sheetnames:
        tempwks = wks.add_worksheet(title=worksheet, rows="1000", cols="30")

    dataframe = pd.DataFrame(data=dictionary).T
    d2g.upload(dataframe, sheetid, worksheet, credentials=gcredentials, row_names=True)
    sheet = wks.worksheet(worksheet)
    sheet.update_acell('A1', 'Date')
コード例 #17
0
def exp_data(loc='gdrive'):
    'excel', 'gdrive'
    if loc == 'excel':
        writer = ExcelWriter(
            "/home/rem/Documents/FXCM Trading (Dropbox)/PyData.xlsx")
        pr_instr.to_excel(writer, 'Sheet1')
        pr_factors.to_excel(writer, 'Sheet2')
        ewma_corr_instr.to_excel(writer, 'Sheet3')
        ewma_cov_instr.to_excel(writer, 'Sheet4')
        vol.to_excel(writer, 'Sheet5')
        ewma_cov_fact.to_excel(writer, 'Sheet6')
        writer.save()
    if loc == 'gdrive':
        d2g.upload(pr_instr, gfile='/Trading FXCM/PyData', wks_name='pr_instr')
        d2g.upload(pr_factors,
                   gfile='/Trading FXCM/PyData',
                   wks_name='pr_factors')
        d2g.upload(ewma_corr_instr,
                   gfile='/Trading FXCM/PyData',
                   wks_name='corr_instr')
        d2g.upload(ewma_cov_instr,
                   gfile='/Trading FXCM/PyData',
                   wks_name='cov_instr')
        d2g.upload(ewma_cov_fact,
                   gfile='/Trading FXCM/PyData',
                   wks_name='cov_fact')
    return
コード例 #18
0
ファイル: test_df2gspread.py プロジェクト: rothnic/df2gspread
def test_worksheet(user_credentials_not_available):

    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import pandas as pd

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    df_upload = pd.DataFrame(
        {'0': ['1', '2', '3', '4']}, index=['1', '2', '3', '4'])

    filepath = ''.join(
        random.choice(string.ascii_uppercase + string.digits) for _ in range(10))

    # First worksheet as default
    d2g.upload(df_upload, filepath)
    df_download = g2d.download(filepath, col_names=True, row_names=True)
    assert all(df_upload == df_download)

    # updating existed spreadsheet
    d2g.upload(df_upload, filepath, wks_name=filepath)
    df_download = g2d.download(filepath, col_names=True, row_names=True)
    assert all(df_upload == df_download)

    # Clear created file from drive
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    delete_file(credentials, file_id)
コード例 #19
0
def main():
    # Define the auth scopes to request.
    scope_ga = ['https://www.googleapis.com/auth/analytics.readonly']
    #Authorize credentials with Google Drive to export data to a Google spreadsheet
    scope_drive = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        json_key_file_location, scope_drive)
    gc = gspread.authorize(credentials)

    # Authenticate and construct service.
    service = get_service('analytics', 'v3', scope_ga, key_file_location,
                          service_account_email)
    profile = get_first_profile_id(service)

    # Modulize the classes
    ga_non_segmented = GA_non_segmented(service, profile)
    ga_segmented = GA_segmented(service, profile)
    # Create an empty pandas dataframe and fill it with this weeks data
    data = pd.DataFrame()
    for metric in no_seg:
        data = data.append(ga_non_segmented.get_results(
            service, profile, start_day, end_day, metric).get('rows')[0],
                           ignore_index=True)
    for metric in seg:
        data = data.append(
            ga_segmented.get_results(service, profile, start_day, end_day,
                                     metric, marketing_paid).get('rows')[0],
            ignore_index=True)
    for metric in seg:
        data = data.append(
            ga_segmented.get_results(service, profile, start_day, end_day,
                                     metric, marketing_free).get('rows')[0],
            ignore_index=True)
    data.columns = [dt.date.today().isocalendar()[1]]
    seg_paid = []
    for word in seg:
        seg_paid.append(word + '_paid')
    seg_free = []
    for word in seg:
        seg_free.append(word + '_free')
    data = data.set_index([no_seg + seg_paid + seg_free])

    # Download the existing Gsheet data to a pandas dataframe
    spreadsheet = 'your_googlesheet_id'
    wks_name = 'ga_weekly_data'
    existing_data = g2d.download(gfile=spreadsheet,
                                 wks_name=wks_name,
                                 col_names=True,
                                 row_names=True)
    # Define new DataFrame, calculate and append data
    ga_weekly_data = pd.DataFrame()
    ga_weekly_data = existing_data
    ga_weekly_data[data.columns] = data[data.columns]
    #Upload the whole pandas dataframe to the data dump spreadsheet on worksheet installs
    d2g.upload(ga_weekly_data, spreadsheet, wks_name)
コード例 #20
0
def upload_sheet(songs_df: pd.DataFrame, key):
	# Get access to spreadsheet
	scope = ['https://spreadsheets.google.com/feeds']
	creds = ServiceAccountCredentials.from_json_keyfile_name('MarkovBot.json', scope)
	client = gspread.authorize(creds)
	sheet = client.open_by_key(key).sheet1

	wks_name = 'Master'
	d2g.upload(songs_df, key, wks_name, credentials=creds, row_names=True)
コード例 #21
0
def update_sheet(sellers_list):
    """
    record sales, revenue, profit in google spreadsheet
    return: data frame of history record
    """
    coln_list = [
        'Seller', 'QTR', 'Product', 'Sales', 'Revenue', 'Expense', 'Profit',
        'Advertisement Strategy', 'Promotion Effectiveness', 'Num of Buyers',
        'Budget'
    ]
    lst = []

    for seller in sellers_list:
        # update metrics
        sales = seller.sales_history
        revenue = seller.revenue_history
        expense = seller.expense_history
        profit = seller.profit_history
        advert = seller.advert_history
        promo = seller.promo_history
        buyer = seller.buyer_history
        budget = seller.budget

        qtr_list = len(sales)
        prd_list = seller.products_list

        for qtr in range(qtr_list):
            for prd in prd_list:
                prd_history = [
                    str(seller.name),
                    int(qtr + 1),
                    str(prd.name),
                    int(sales[qtr][prd]),
                    int(revenue[qtr][prd]),
                    int(expense[qtr][prd]),
                    int(profit[qtr][prd]),
                    str(advert[qtr][prd]),
                    round(promo[qtr][prd], 2),
                    int(buyer[qtr][prd]),
                    int(budget[qtr])
                ]

                lst.append(prd_history)

    # spread sheet address
    spreadsheet_key = '1H_MLURMBXG1jYapARtWbh_jd6T83huYAb8tSw7kUhv4'
    # workbook name
    wks_name = 'Master'
    df = pd.DataFrame(lst, columns=coln_list)
    # write data frame to spread sheet
    d2g.upload(df,
               spreadsheet_key,
               wks_name,
               credentials=credentials,
               row_names=True)

    return df
コード例 #22
0
def alldatatosheets(dictionary,sheetid,worksheet):
    jsonfile = './configuration/'+config['JSON file from Google']
    import gspread
    from df2gspread import df2gspread as d2g
    import oauth2client
    from oauth2client.service_account import ServiceAccountCredentials
    scope = ['https://spreadsheets.google.com/feeds',
             'https://www.googleapis.com/auth/drive']
    gcredentials = ServiceAccountCredentials.from_json_keyfile_name(jsonfile , scope)

    client = gspread.authorize(gcredentials)
    wks = client.open_by_key(sheetid)
    x = 0
    sheetnames = []
    try:
        while wks.get_worksheet(x) != None:
            sheetnames.append(wks.get_worksheet(x).title)
            x += 1
    except:
        pass
    if not worksheet in sheetnames:
        tempwks = wks.add_worksheet(title=worksheet, rows="1000", cols="30")

    labelslist = []
    for i,j in dictionary.items():
        for k,l in j.items():
            if k=='labels' and l != {}:
                for m,n in l.items():
                    labelslist.append((i,n))
    memberslist = []
    for i,j in dictionary.items():
        for k,l in j.items():
            if k=='members' and l !={}:
                for m,n in l.items():
                    memberslist.append((i,n))
    for i,j in dictionary.items():
        try:
            del j['labels']
        except:
            pass
    if labelslist != []:
        columnslabels = ['cardid','label']
        columnsmembers = ['cardid','member']
        df1 = pd.DataFrame(data=kaarten).T
        df2 = pd.DataFrame(data=labelslist,columns=columnslabels)
        df3 = pd.merge(df1,df2,on='cardid', how='left')
        df4 = pd.DataFrame(data=memberslist,columns=columnsmembers)
        dataframe = pd.merge(df3,df4,on='cardid', how='left')


    else:
        columnsmembers = ['cardid','member']
        df1 = pd.DataFrame(data=kaarten).T
        df2 = pd.DataFrame(data=memberslist,columns=columnsmembers)
        dataframe = pd.merge(df1,df2,on='cardid', how='left')

    d2g.upload(dataframe, sheetid, worksheet, credentials=gcredentials, row_names=True)
コード例 #23
0
def write(df, SPREADSHEET_KEY, WORKSHEET_NAME):
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        'credentials.json', scope)
    gc = gspread.authorize(credentials)
    d2g.upload(df,
               SPREADSHEET_KEY,
               WORKSHEET_NAME,
               credentials=credentials,
               row_names=True)
    print("Written")
コード例 #24
0
def send_to_gs(df, wks_name):
    """
    Send pandas dataframe to Google Spreadshseet
    """

    d2g.upload(df,
               spreadsheet_key,
               wks_name,
               credentials=credentials,
               row_names=True)
コード例 #25
0
def _write_sheets_table(df, config, drive_config):

    credentials, gc = _get_credentials()

    spreadsheet_key = drive_config['spreadsheet_key']
    wks_name = drive_config['worksheet']
    d2g.upload(df,
               spreadsheet_key,
               wks_name,
               row_names=False,
               credentials=credentials)
コード例 #26
0
ファイル: comecar.py プロジェクト: t-madeira-novus/GFIP
def start(month, year, app):
    # Load the dataframe
    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        'gfip-294718-351dce3da4ae.json',
        scope)  # The credential json file here
    spreadsheet_key = "1SFa0STOpjhB5b8Eqi92E40yDBCH9EFwqG_Mgv5gYdq0"
    df = read_google_spreadsheet()
    companies = df['Empresas'].tolist()
    faps = df['Fap'].tolist()
    prolabores = df['Pró-labore'].tolist()
    path = "P:\\documentos\\OneDrive - Novus Contabilidade\\Doc Compartilhado\\Pessoal\\Relatórios Sefip"

    # Standardize the Pró-labore's format
    for i in range(0, len(prolabores)):
        if prolabores[i] != "Sim":
            prolabores[i] = "Não"

    # Make sure the codes are int values (sometimes they are imported as float)
    for i in range(0, len(companies)):
        try:
            companies[i] = int(companies[i])
        except ValueError:
            pass

    dictionary = cria_dicionario(companies, faps, prolabores,
                                 path)  # {Código: [Nome, Fap, Pró-labore]}
    rows_to_delete = []
    for i in df.index:
        if df.at[i, 'Fase 1'] != 'Ok':
            print('Começando Fase 1', companies[i])
            phase_1(df, i, dictionary, companies[i], month, year)
        elif month != '13' and 'Verificado' in df.at[
                i, 'Verificado Fase 1'] and df.at[i, 'Fase 2'] != 'Ok':
            print('Começando Fase 2', companies[i])
            if phase_2(df, i, dictionary, companies[i], month, year):
                df.at[i, 'Fase 2'] = 'Ok'
                d2g.upload(df,
                           spreadsheet_key,
                           credentials=credentials,
                           row_names=False
                           )  # row_names=False removes the index 'column'
        elif df.at[i, 'Verificado Fase 1'] == 'Verificado' and df.at[
                i, 'Verificado Fase 2'] == 'Verificado':
            rows_to_delete.append(i)
        elif df.at[i, 'Pró-labore'] == "Sim" and df.at[
                i, 'Verificado Fase 1'] == 'Verificado':
            rows_to_delete.append(i)
        else:
            print("Conferir linha {} empresa {}".format(i, companies[i]))
コード例 #27
0
  def writeSheet(self,data):
    import gspread
    from df2gspread import df2gspread as d2g
    from oauth2client.service_account import ServiceAccountCredentials
    import pandas as pd

    scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
    creds = ServiceAccountCredentials.from_json_keyfile_name('cliente_secret.json', scope)
    client = gspread.authorize(creds)
    spreadsheet_key = <yor spreadsheet ID here>
    wks_name = 'ratings'
    d2g.upload(data, spreadsheet_key, wks_name, credentials=creds, row_names=True, clean=True)
コード例 #28
0
def store_data_firstBuy(dataframe, creds, week_diff):
    sheet_name = 'FirstBuy'
    start_row = str(week_diff - 1)
    d2g.upload(df=dataframe,
               gfile='1skrKAOLZLPoul59kgjbY6jQwE60VxVCOcIwG2fZJCXo',
               start_cell="A" + start_row,
               wks_name=sheet_name,
               col_names=False,
               row_names=False,
               clean=False,
               credentials=creds,
               df_size=False)
    print(sheet_name + " finished!")
コード例 #29
0
def load2GSheet(df, sheetName):
    scopes = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]
    credentials = SAC.from_json_keyfile_name("credentials.json", scopes)

    spreadsheet_key = '1HqhkjYDrTR8jgLIZ0GMJzYqGk4tRNd6hTWVlzfySccE'
    d2g.upload(df=df,
               gfile=spreadsheet_key,
               wks_name=sheetName,
               credentials=credentials,
               row_names=True)
コード例 #30
0
def test_worksheet_invalid_name(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import pandas as pd

    from df2gspread import gspread2df as g2d
    from df2gspread import df2gspread as d2g

    filepath = '/df2gspread_tests/invalid_wks_name'
    df_upload = pd.DataFrame(['test'])
    d2g.upload(df_upload, filepath)

    with pytest.raises(RuntimeError):
        g2d.download(gfile=filepath, wks_name='invalid_wks_name')
コード例 #31
0
def test_worksheet_invalid_name(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import pandas as pd

    from df2gspread import gspread2df as g2d
    from df2gspread import df2gspread as d2g

    filepath = '/df2gspread_tests/invalid_wks_name'
    df_upload = pd.DataFrame(['test'])
    d2g.upload(df_upload, filepath)

    with pytest.raises(RuntimeError):
        g2d.download(gfile=filepath, wks_name='invalid_wks_name')
コード例 #32
0
def write_cleaned_data_to_worksheet(df):
    '''
    Write clean data to a new Google Sheets (cleansed data layer)
    '''
    sh = gc.open('job-seeker-maringa-cleaned-data')

    clean_data_worksheet_name = str(datetime.today().strftime('%Y-%m-%d') +
                                    '_cleaned')

    sh.share(MY_EMAIL_ADDRESS, perm_type='user', role='writer')

    d2g.upload(df,
               CLEAN_DATA_SPREADSHEET_KEY,
               clean_data_worksheet_name,
               credentials=credentials,
               row_names=False)
コード例 #33
0
def store_data_MAU(dataframe, creds, sheet, new_month):

    sheet_name = 'MAU'
    start_row = ''
    if new_month: start_row = str(len(sheet.get_all_records()) + 1)
    else: start_row = str(len(sheet.get_all_records()))
    d2g.upload(df=dataframe,
               gfile='1skrKAOLZLPoul59kgjbY6jQwE60VxVCOcIwG2fZJCXo',
               start_cell="A" + start_row,
               wks_name=sheet_name,
               col_names=False,
               row_names=False,
               clean=False,
               credentials=creds,
               df_size=False)
    print(sheet_name + " finished!")
コード例 #34
0
def test_big_worksheet(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import numpy as np
    import pandas as pd
    from pandas.util.testing import assert_frame_equal

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    filepath = '/df2gspread_tests/' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    # Indexes count out of default 1000
    df_upload = pd.DataFrame(index=range(1001),
                             columns=range(2))
    df_upload = df_upload.fillna('0')
    d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    df_download = g2d.download(filepath)
    # df_download.columns = df_download.columns.astype(np.int64)

    assert_frame_equal(df_upload, df_download)

    # Columns count out of default 100
    df_upload = pd.DataFrame(index=range(1),
                             columns=range(101))
    df_upload = df_upload.fillna('0')
    d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    df_download = g2d.download(filepath)
    # df_download.columns = df_download.columns.astype(np.int64)

    assert_frame_equal(df_upload, df_download)

    # Clear created file from drive
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    delete_file(credentials, file_id)
コード例 #35
0
ファイル: test_rafflepy.py プロジェクト: kejbaly2/rafflepy
def test_gspread_load():
    import random
    import string
    try:
        import pandas as pd
        from df2gspread import df2gspread as d2g
        from df2gspread import gspread2df as g2d
        from df2gspread.utils import get_credentials
        from df2gspread.gfiles import get_file_id
        from df2gspread.gfiles import delete_file
    except:
        assert 0

    from rafflepy import raffle

    df_upload = pd.DataFrame({'color': ['b', 'w', 'p', 'm'],
                              'Username': ['John', 'Nicolos', 'Katy', 'Mike'],
                              'Other': [1, 2, 3, 4]})

    filepath = ''.join(
        random.choice(string.ascii_uppercase + string.digits) for _ in range(10))

    # Uploading df to test-spreadsheet
    d2g.upload(df_upload, filepath)
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)

    # Downloading via file_id
    uri = file_id
    pool = raffle.input_gload(uri)

    assert all(pool == df_upload['Username'].values)

    # Downloading certain column
    column = 'color'
    pool = raffle.input_gload(uri, column)

    assert all(pool == df_upload['color'].values)

    # Clear created file from drive
    delete_file(credentials, file_id)
コード例 #36
0
ファイル: test_df2gspread.py プロジェクト: rothnic/df2gspread
def test_spreadsheet(user_credentials_not_available):

    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import pandas as pd

    from df2gspread import df2gspread as d2g
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    df = pd.DataFrame([1, 2, 3, 4])
    filepath = ''.join(
        random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
    credentials = get_credentials()

    # creation of new spreadsheet
    d2g.upload(df, filepath)

    # updating existed spreadsheet by filepath
    df = pd.DataFrame([1, 2, 3, 4, 5])
    d2g.upload(df, filepath)

    # updating existed spreadsheet by file_id
    file_id = get_file_id(credentials, filepath)
    d2g.upload(df, file_id)

    # Clear created file from drive
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    delete_file(credentials, file_id)
コード例 #37
0
def test_gspread2df_start_cell(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import numpy as np
    import pandas as pd
    from numpy.testing import assert_array_equal
    from pandas.util.testing import assert_frame_equal

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    filepath = '/df2gspread_tests/' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    df_upload = pd.DataFrame(
        {0: ['1', '2', 'x', '4'],
        1: ['2', '2', 'y', '4'],
        2: ['3', '2', 'w', '4'],
        3: ['4', '2', 'z', '4']},
        index=[0, 1, 2, 3])

    # Start cell out of the table size
    d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    with pytest.raises(RuntimeError):
        df_download = g2d.download(filepath, start_cell='A5')

    with pytest.raises(RuntimeError):
        df_download = g2d.download(filepath, start_cell='E1')

    # Should be fixed in gspread
    # with pytest.raises(RuntimeError):
    #     df_download = g2d.download(filepath, start_cell='A0')

    # start_cell = 'A3'
    d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    df_download = g2d.download(filepath, start_cell='A3')
    assert_array_equal(df_upload.iloc[2:,:], df_download)

    # start_cell = 'B3'
    d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    df_download = g2d.download(filepath, start_cell='B3')
    assert_array_equal(df_upload.iloc[2:,1:], df_download)

    # Clear created file from drive
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    delete_file(credentials, file_id)
コード例 #38
0
def test_df2gspread_start_cell(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import numpy as np
    import pandas as pd
    from pandas.util.testing import assert_frame_equal

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    filepath = '/df2gspread_tests/' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    df_upload_0 = pd.DataFrame(
        {0: ['1', '2', 'x', '4']},
        index=[0, 1, 2, 3])

    d2g.upload(df_upload_0, filepath, row_names=False, col_names=False, start_cell='A1')
    df_download = g2d.download(filepath)
    df_upload = df_upload_0
    assert_frame_equal(df_upload, df_download)

    d2g.upload(df_upload_0, filepath, row_names=False, col_names=False, start_cell='A2')
    df_download = g2d.download(filepath)
    df_upload = df_upload_0
    new_rows = 1
    new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
    new_rows_array[:] = ''
    df_new_rows = pd.DataFrame(data = new_rows_array)
    df_upload = df_new_rows.append(df_upload, ignore_index=True)
    assert_frame_equal(df_upload, df_download)

    d2g.upload(df_upload_0, filepath, row_names=False, col_names=False, start_cell='B1')
    df_download = g2d.download(filepath)
    df_upload = df_upload_0
    df_upload.insert(0, '-1', '')
    df_upload.columns = range(0, len(df_upload.columns))
    assert_frame_equal(df_upload, df_download)

    d2g.upload(df_upload_0, filepath, row_names=False, col_names=False, start_cell='AB10')
    df_download = g2d.download(filepath)
    df_upload = df_upload_0
    new_cols = 27
    new_cols_array = np.chararray((len(df_upload), new_cols))
    new_cols_array[:] = ''
    df_new_cols = pd.DataFrame(data = new_cols_array)
    df_upload = pd.concat([df_new_cols, df_upload], axis=1)
    df_upload.columns = range(0, len(df_upload.columns))
    new_rows = 9
    new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
    new_rows_array[:] = ''
    df_new_rows = pd.DataFrame(data = new_rows_array)
    df_upload = df_new_rows.append(df_upload, ignore_index=True)
    assert_frame_equal(df_upload, df_download)

    # Backward compatibility df2gspread => gspread2df
    d2g.upload(df_upload_0, filepath, row_names=False, col_names=False, start_cell='AB10')
    df_upload = df_upload_0
    df_download = g2d.download(filepath, start_cell='AB10')
    assert_frame_equal(df_upload, df_download)

    d2g.upload(df_upload_0, filepath, start_cell='AB10')
    df_upload = df_upload_0
    df_download = g2d.download(filepath, row_names=True, col_names=True, start_cell='AB10')
    df_download.index = df_download.index.astype(np.int64)
    df_download.columns = df_download.columns.astype(np.int64)
    assert_frame_equal(df_upload, df_download)

    # Clear created file from drive
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    delete_file(credentials, file_id)
コード例 #39
0
def test_df2gspread_df_size(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import numpy as np
    import pandas as pd
    import gspread
    from pandas.util.testing import assert_frame_equal, assert_equal

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id, delete_file, get_worksheet

    filepath = '/df2gspread_tests/' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))
    credentials = get_credentials()
    gc = gspread.authorize(credentials)
    gfile_id = get_file_id(credentials, filepath, write_access=True)

    df_upload_a = pd.DataFrame(
        {0: ['1', '2', 'x', '4']},
        index=[0, 1, 2, 3])

    df_upload_b = pd.DataFrame(data = np.array([np.arange(1500)]*2).T).applymap(str)

    #Uploading a small DF to new sheet to test for sizing down from default
    d2g.upload(df_upload_a, filepath, "test1", row_names=False, col_names=False, df_size=True)
    df_download = g2d.download(filepath, "test1")
    df_upload = df_upload_a
    wks = get_worksheet(gc, gfile_id, "test1")
    assert_equal(wks.row_count, len(df_upload))
    assert_equal(len(df_upload.columns), wks.col_count)
    assert_equal(len(df_download), len(df_upload))
    assert_frame_equal(df_upload, df_download)

    #Upload a large DF to existing, smaller sheet to test for proper expansion
    d2g.upload(df_upload_b, filepath, "test1", row_names=False, col_names=False, df_size=True)
    df_download = g2d.download(filepath, "test1")
    df_upload = df_upload_b
    wks = get_worksheet(gc, gfile_id, "test1")
    assert_equal(wks.row_count, len(df_upload))
    assert_equal(len(df_upload.columns), wks.col_count)
    assert_equal(len(df_download), len(df_upload))
    assert_frame_equal(df_upload, df_download)

    #Uploading a small DF to existing large sheet to test for sizing down from default
    d2g.upload(df_upload_a, filepath, "test1", row_names=False, col_names=False, df_size=True)
    df_download = g2d.download(filepath, "test1")
    df_upload = df_upload_a
    wks = get_worksheet(gc, gfile_id, "test1")
    assert_equal(wks.row_count, len(df_upload))
    assert_equal(len(df_upload.columns), wks.col_count)
    assert_equal(len(df_download), len(df_upload))
    assert_frame_equal(df_upload, df_download)

    #New sheet with col names, make sure 1 extra row and column
    d2g.upload(df_upload_a, filepath, "test2", row_names=True, col_names=True, df_size=True)
    df_download = g2d.download(filepath, "test2")
    df_upload = df_upload_a
    wks = get_worksheet(gc, gfile_id, "test2")
    assert_equal(wks.row_count, len(df_upload) + 1)
    assert_equal(len(df_upload.columns) + 1, wks.col_count)
    assert_equal(len(df_download), len(df_upload) + 1)

    #Upload to new sheet with specified dimensions
    d2g.upload(df_upload_a, filepath, "test3", row_names=False, col_names=False, new_sheet_dimensions=(100,10))
    df_download = g2d.download(filepath, "test3")
    df_upload = df_upload_a
    wks = get_worksheet(gc, gfile_id, "test3")
    assert_equal(wks.row_count, 100)
    assert_equal(10, wks.col_count)
    assert_frame_equal(df_upload, df_download)

    #Test df_size with start_cell
    d2g.upload(df_upload_a, filepath, "test4", row_names=False, col_names=False, start_cell='AB10',
        df_size = True)
    df_download = g2d.download(filepath, "test4")
    df_upload = df_upload_a
    new_cols = 27
    new_cols_array = np.chararray((len(df_upload), new_cols))
    new_cols_array[:] = ''
    df_new_cols = pd.DataFrame(data = new_cols_array)
    df_upload = pd.concat([df_new_cols, df_upload], axis=1)
    df_upload.columns = range(0, len(df_upload.columns))
    new_rows = 9
    new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
    new_rows_array[:] = ''
    df_new_rows = pd.DataFrame(data = new_rows_array)
    df_upload = df_new_rows.append(df_upload, ignore_index=True)
    wks = get_worksheet(gc, gfile_id, "test4")
    assert_equal(wks.row_count, len(df_upload))
    assert_equal(len(df_upload.columns), wks.col_count)
    assert_equal(len(df_download), len(df_upload))
    assert_frame_equal(df_upload, df_download)

    #Test df_size with start_cell and sheet dimensions which need to be expanded
    d2g.upload(df_upload_a, filepath, "test5", row_names=False, col_names=False, start_cell='AB10',
        df_size = True, new_sheet_dimensions = (10,27))
    df_download = g2d.download(filepath, "test5")
    df_upload = df_upload_a
    new_cols = 27
    new_cols_array = np.chararray((len(df_upload), new_cols))
    new_cols_array[:] = ''
    df_new_cols = pd.DataFrame(data = new_cols_array)
    df_upload = pd.concat([df_new_cols, df_upload], axis=1)
    df_upload.columns = range(0, len(df_upload.columns))
    new_rows = 9
    new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
    new_rows_array[:] = ''
    df_new_rows = pd.DataFrame(data = new_rows_array)
    df_upload = df_new_rows.append(df_upload, ignore_index=True)
    wks = get_worksheet(gc, gfile_id, "test5")
    assert_equal(wks.row_count, len(df_upload))
    assert_equal(len(df_upload.columns), wks.col_count)
    assert_equal(len(df_download), len(df_upload))
    assert_frame_equal(df_upload, df_download)

    # Clear created file from drive
    delete_file(credentials, gfile_id)
コード例 #40
0
def test_worksheet(user_credentials_not_available):
    if user_credentials_not_available:
        pytest.xfail(reason='Credentials')

    import string
    import random
    import numpy as np
    import pandas as pd
    from pandas.util.testing import assert_frame_equal

    from df2gspread import df2gspread as d2g
    from df2gspread import gspread2df as g2d
    from df2gspread.utils import get_credentials
    from df2gspread.gfiles import get_file_id
    from df2gspread.gfiles import delete_file

    df_upload = pd.DataFrame(
        {0: ['1', '2', 'x', '4']},
        index=[0, 1, 2, 3])

    filepath = '/df2gspread_tests/' + ''.join(
        random.choice(string.ascii_uppercase + string.digits)
        for _ in range(10))

    # First worksheet as default
    d2g.upload(df_upload, filepath)
    df_download = g2d.download(filepath, col_names=True, row_names=True)
    df_download.columns = df_download.columns.astype(np.int64)
    df_download.index = df_download.index.astype(np.int64)
    assert_frame_equal(df_upload, df_download)

    # Updating existed spreadsheet
    d2g.upload(df_upload, filepath, wks_name='Sheet2')
    df_download = g2d.download(filepath, col_names=True, row_names=True)
    df_download.columns = df_download.columns.astype(np.int64)
    df_download.index = df_download.index.astype(np.int64)
    assert_frame_equal(df_upload, df_download)

    # Updating with file_id
    credentials = get_credentials()
    file_id = get_file_id(credentials, filepath)
    d2g.upload(df_upload, file_id)
    df_download = g2d.download(file_id, col_names=True, row_names=True)
    df_download.columns = df_download.columns.astype(np.int64)
    df_download.index = df_download.index.astype(np.int64)
    assert_frame_equal(df_upload, df_download)

    # Only row_names
    wks = d2g.upload(df_upload, filepath, col_names=False)
    df_download = g2d.download(filepath, row_names=True)
    df_download.index = df_download.index.astype(np.int64)
    # df_download.columns = df_download.columns.astype(np.int64)

    assert_frame_equal(df_upload, df_download)

    # Only col_names
    wks = d2g.upload(df_upload, filepath, row_names=False)
    df_download = g2d.download(filepath, col_names=True)
    df_download.columns = df_download.columns.astype(np.int64)

    assert_frame_equal(df_upload, df_download)

    # Without column or row names
    wks = d2g.upload(df_upload, filepath, row_names=False, col_names=False)
    df_download = g2d.download(filepath)

    assert_frame_equal(df_upload, df_download)

    # Clear created file from drive
    delete_file(credentials, file_id)