def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to transform excel sheets into csv files. Prints to STDOUT, user is expected to pipe output into file. Typically used for BigQuery data imports. Examples: List sheets in workbook: python helper.py [EXCEL FILE] --list Convert excel to CSV: python helper.py [EXCEL FILE] --sheet [SHEET NAME] > results.csv """)) parser.add_argument('workbook', help='name of file to pull the rows.') parser.add_argument('--sheet', help='Sheet to pull the rows.', default=None) parser.add_argument('--list', help='List reports.', action='store_true') # initialize project parser = commandline_parser(parser, arguments=('-v')) args = parser.parse_args() config = Configuration( verbose=args.verbose ) with open(args.workbook, 'rb') as excel_file: if args.list: for sheet in excel_to_sheets(excel_file): print(sheet) elif args.sheet: for sheet, row in excel_to_rows(excel_file, args.sheet): print(rows_to_csv(row).read())
def main(): # load standard parameters parser = commandline_parser() parser.add_argument( '--recipe_out', '-rc', help='Path to recipe file to be written if replacing fields.', default=None) args = parser.parse_args() # load json to get each task recipe = get_recipe(args.json) # check if all fields have been converted to values validate(recipe, args.no_input) # check to write converted fields to stdout if args.recipe_out: print() print('Writing to:', args.recipe_out) f = open(args.recipe_out, 'w') f.write(json.dumps(recipe, sort_keys=True, indent=2)) f.close() exit() # initialize the project singleton with passed in parameters configuration = Configuration(recipe, args.project, args.user, args.service, args.client, args.json, args.key, args.verbose, args.trace_print, args.trace_file) execute(configuration, recipe['tasks'], args.force, args.instance)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to send email template via gMail. Email templates are JSON that assembles into both HTMl and TXT parts of an email. For email sample see: https://github.com/google/starthinker/blob/master/starthinker/task/newsletter/sample.json Example: - Generate an HTML page from a template, then view via browser. python newsletter.py --template scripts/newsletter_sample.json > ~/Downloads/email.html - Send an email template via gMail. python newsletter.py --template scripts/newsletter_sample.json --email_to [email protected] --email_from [email protected] -u $STARTHINKER_USER """)) # get parameters parser.add_argument( '--template', help='template to use for email', default=None, required=True) parser.add_argument('--email_to', help='email to', default=None) parser.add_argument('--email_from', help='email from', default=None) # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-v')) args = parser.parse_args() config = Configuration( user=args.user, client=args.client, verbose=args.verbose ) # load template with open(args.template, 'r') as json_file: email = EmailTemplate(json.load(json_file)) # send or print if args.email_to and args.email_from: print('EMAILING: ', args.email_to) send_email( config, 'user', args.email_to, args.email_from, None, email.get_subject(), email.get_text(), email.get_html() ) else: # write to STDOUT print(email.get_html()) print('<pre style="width:600px;margin:0px auto;">%s</pre>' % email.get_text())
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to turn recipe into Jupyter Notebook. Example: python colab.py [path to existing recipe.json] --fo [path to new jupyter file.ipynb] """)) parser.add_argument('json', help='Path to recipe json file to load.') parser.add_argument( '--file_out', '-fo', help='Path to recipe file to be written if replacing fields.', default=None ) # initialize project parser = commandline_parser(parser, arguments=('-p', '-c', '-u', '-s')) args = parser.parse_args() # load json to get each task recipe = get_recipe(args.json) # create Jupyter Notebook (Colab) notebook = recipe_to_colab( name=(args.file_out or args.json).rsplit('/', 1)[-1].split('.')[0], # take filename without extension of destination or source description=recipe.get('description'), instructions=recipe.get('instructions'), tasks=recipe['tasks'], project=args.project, client_credentials=args.client, user_credentials=args.user, service_credentials=args.service ) # check to write converted fields to stdout if args.file_out: print('Writing to:', args.file_out) f = open(args.file_out, 'w') f.write(notebook) f.close() else: print(notebook)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Creates USER credentials from Google Cloud Project CLIENT Credentials and displays profile information if it worked. CLIENT credentials are required to run this script, to obtain the JSON file... Step 1: Configure Authentication Consent Screen ( do only once ) ---------------------------------------- A. Visit: https://console.developers.google.com/apis/credentials/consent B. Choose Internal if you have GSuite, otherwise choose External. C. For Application Name enter: StarThinker D. All other fields are optional, click Save. Step 2: Create CLIENT Credentials ( do only once ) ---------------------------------------- A. Visit: https://console.developers.google.com/apis/credentials/oauthclient B. Choose Desktop. C. For Name enter: StarThinker. D. Click Create and ignore the confirmation pop-up. Step 3: Download CLIENT Credentials File ( do only once )" ----------------------------------------" A. Visit: https://console.developers.google.com/apis/credentials" B. Find your newly created key under OAuth 2.0 Client IDs and click download arrow on the right." C. The downloaded file is the CLIENT credentials, use its path for the --client -c parameter. Step 4: Generate USER Credentials File ( do only once )" ----------------------------------------" A. Run this command with parameters -c [CLIENT file path] and -u [USER file path]. B. The USER file will be created and can be used to access Google APIs. C. The user profile will be printed to the screen Example: python helper.py -u [CLIENT file path] -c [USER file path] """)) # initialize project parser = commandline_parser(parser, arguments=('-c', '-u')) args = parser.parse_args() config = Configuration( user=args.user, client=args.client ) # get profile to verify everything worked print('Profile:', json.dumps(get_profile(config), indent=2, sort_keys=True))
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line interface for running Google API calls. Any API works. Allows developers to quickly test and debug API calls before building them into scripts. Useful for debugging permission or call errors. Examples: - Pull a DBM report via API. - https://developers.google.com/bid-manager/v1/queries/getquery - python google_api.py -api doubleclickbidmanager -version v1 -function queries.getquery -kwargs '{ "queryId": 132865172 }' -u [credentials path] - Pull a list of placements: - https://developers.google.com/doubleclick-advertisers/v3.3/placements/list - python google_api.py -api dfareporting -version v3.3 -function placements.list -kwargs '{ "profileId":2782211 }' -u [credentials path] - Show schema for Campaign Manager advertiser list endpoint. - https://developers.google.com/doubleclick-advertisers/v3.4/advertisers/list - python google_api.py -api dfareporting -version v3.4 -function advertisers.list --schema - python google_api.py -api dfareporting -version v3.4 -function Advertiser --object - python google_api.py -api dfareporting -version v3.4 -function Advertiser --struct """)) # get parameters parser.add_argument('-api', help='api to run, name of product api') parser.add_argument('-version', help='version of api') parser.add_argument('-function', help='function or resource to call in api') parser.add_argument('-uri', help='uri to use in api', default=None) parser.add_argument('-developer-token', help='developer token to pass in header', default=None) parser.add_argument( '-login-customer-id', help='customer to log in with when manipulating an MCC', default=None) parser.add_argument( '-kwargs', help='kwargs to pass to function, json string of name:value pairs') parser.add_argument('--iterate', help='force iteration', action='store_true') parser.add_argument('--limit', type=int, help='optional, number of records to return', default=None) parser.add_argument( '--schema', help='return function as BigQuery schema, function = [endpoint.method]', action='store_true') parser.add_argument( '--object', help= 'return resource as JSON discovery document, function = [resource]', action='store_true') parser.add_argument( '--struct', help='return resource as BigQuery structure, function = [resource]', action='store_true') # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-k', '-v')) args = parser.parse_args() config = Configuration(user=args.user, client=args.client, service=args.service, key=args.key, verbose=args.verbose) # show schema if args.object: print( json.dumps(Discovery_To_BigQuery( args.api, args.version).resource_json(args.function), indent=2, default=str)) elif args.struct: print( Discovery_To_BigQuery(args.api, args.version).resource_struct(args.function)) # show schema elif args.schema: print( json.dumps(Discovery_To_BigQuery( args.api, args.version).method_schema(args.function), indent=2, default=str)) # or fetch results else: # the api wrapper takes parameters as JSON job = { 'auth': 'service' if args.service else 'user', 'api': args.api, 'version': args.version, 'function': args.function, 'key': args.key, 'uri': args.uri, 'kwargs': json.loads(args.kwargs), 'headers': {}, 'iterate': args.iterate, 'limit': args.limit, } if args.developer_token: job['headers']['developer-token'] = args.developer_token if args.login_customer_id: job['headers']['login-customer-id'] = args.login_customer_id # run the API call results = API(job).execute() # display results if args.iterate: for result in results: pprint.PrettyPrinter().pprint(result) else: pprint.PrettyPrinter().pprint(results)
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to help debug CM reports and build reporting tools. Examples: To get list of reports: python cm.py --account [id] --list -u [user credentials path] To get report: python cm.py --account [id] --report [id] -u [user credentials path] To get report files: python cm.py --account [id] --files [id] -u [user credentials path] To get report sample: python cm.py --account [id] --sample [id] -u [user credentials path] To get report schema: python cm.py --account [id] --schema [id] -u [user credentials path] """)) parser.add_argument('--account', help='Account ID to use to pull the report.', default=None) parser.add_argument('--report', help='Report ID to pull JSON definition.', default=None) parser.add_argument('--schema', help='Report ID to pull achema definition.', default=None) parser.add_argument('--sample', help='Report ID to pull sample data.', default=None) parser.add_argument('--files', help='Report ID to pull file list.', default=None) parser.add_argument('--list', help='List reports.', action='store_true') # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-v')) args = parser.parse_args() config = Configuration(user=args.user, client=args.client, service=args.service, verbose=args.verbose) auth = 'service' if args.service else 'user' is_superuser, profile = get_profile_for_api(config, auth, args.account) kwargs = { 'profileId': profile, 'accountId': args.account } if is_superuser else { 'profileId': profile } # get report list if args.report: kwargs['reportId'] = args.report report = API_DCM( config, auth, internal=is_superuser).reports().get(**kwargs).execute() print(json.dumps(report, indent=2, sort_keys=True)) # get report files elif args.files: kwargs['reportId'] = args.files for rf in API_DCM( config, auth, internal=is_superuser, iterate=True).reports().files().list(**kwargs).execute(): print(json.dumps(rf, indent=2, sort_keys=True)) # get schema elif args.schema: filename, report = report_file(config, auth, args.account, args.schema, None, 10) rows = report_to_rows(report) rows = report_clean(rows) print(json.dumps(report_schema(next(rows)), indent=2, sort_keys=True)) # get sample elif args.sample: filename, report = report_file(config, auth, args.account, args.sample, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) for r in rows_print(rows, row_min=0, row_max=20): pass # get list else: for report in API_DCM(config, auth, internal=is_superuser, iterate=True).reports().list(**kwargs).execute(): print(json.dumps(report, indent=2, sort_keys=True))
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to help debug DV360 reports and build reporting tools. Examples: To get list of reports: python dv.py --list -u [user credentials path] To get report json: python dv.py --report [id] -u [user credentials path] To get report schema: python dv.py --schema [id] -u [user credentials path] To get report sample: python dv.py --sample [id] -u [user credentials path] """)) # create parameters parser.add_argument('--report', help='report ID to pull json definition', default=None) parser.add_argument('--schema', help='report ID to pull schema format', default=None) parser.add_argument('--sample', help='report ID to pull sample data', default=None) parser.add_argument('--list', help='list reports', action='store_true') # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-v')) args = parser.parse_args() config = Configuration(user=args.user, client=args.client, service=args.service, verbose=args.verbose) auth = 'service' if args.service else 'user' # get report if args.report: report = API_DBM( config, auth).queries().getquery(queryId=args.report).execute() print(json.dumps(report, indent=2, sort_keys=True)) # get schema elif args.schema: filename, report = report_file(config, auth, args.schema, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) print(json.dumps(get_schema(rows)[1], indent=2, sort_keys=True)) # get sample elif args.sample: filename, report = report_file(config, auth, args.sample, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) for r in rows_print(rows, row_min=0, row_max=20): pass # get list else: for report in API_DBM(config, auth, iterate=True).queries().listqueries().execute(): print(json.dumps(report, indent=2, sort_keys=True))
def main(): # get parameters parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to get table schema from BigQuery. Helps developers upload data to BigQuery and pull schemas. These are the most common BigQuery tasks when developing solutions. Examples: Display table schema: `python helper.py --project [id] --dataset [name] --table [name] -s [credentials]` Upload csv table: `python helper.py --project [id] --dataset [name] --table [name] --csv [file] --schema [file] -s [credentials]` Upload excel sheet: `python helper.py --project [id] --dataset [name] --table [name] --excel_file [file] --excel_sheet [name] --schema [file] -s [credentials]` """)) parser.add_argument('--dataset', help='name of BigQuery dataset', default=None) parser.add_argument('--table', help='name of BigQuery table', default=None) parser.add_argument('--csv', help='CSV file path', default=None) parser.add_argument('--schema', help='SCHEMA file path', default=None) parser.add_argument('--excel_workbook', help='Excel file path', default=None) parser.add_argument('--excel_sheet', help='Excel sheet name', default=None) # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-v', '-p')) args = parser.parse_args() config = Configuration(user=args.user, client=args.client, service=args.service, verbose=args.verbose, project=args.project) auth = 'service' if args.service else 'user' schema = json.loads(args.schema) if args.schema else None if args.csv: with open(args.csv, 'r') as csv_file: rows = csv_to_rows(csv_file.read()) if not schema: rows, schema = get_schema(rows) print('DETECETED SCHEMA', json.dumps(schema)) print('Please run again with the above schema provided.') exit() rows_to_table(config, auth, config.project, args.dataset, args.table, rows, schema) elif args.excel_workbook and args.excel_sheet: with open(args.excel_workbook, 'r') as excel_file: rows = excel_to_rows(excel_file, args.excel_sheet) if not schema: rows, schema = get_schema(rows) print('DETECETED SCHEMA', json.dumps(schema)) print('Please run again with the above schema provided.') exit() rows_to_table(config, auth, config.project, args.dataset, args.table, rows, schema) else: # print schema print( json.dumps(table_to_schema(config, auth, config.project, args.dataset, args.table), indent=2))