def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ Command line to help debug DV360 reports and build reporting tools. Examples: To get list of reports: python helper.py --list -u [user credentials path] To get report json: python helper.py --report [id] -u [user credentials path] To get report schema: python helper.py --schema [id] -u [user credentials path] To get report sample: python helper.py --sample [id] -u [user credentials path] ''')) # create parameters parser.add_argument('--report', help='report ID to pull json definition', default=None) parser.add_argument('--schema', help='report ID to pull schema format', default=None) parser.add_argument('--sample', help='report ID to pull sample data', default=None) parser.add_argument('--list', help='list reports', action='store_true') # initialize project project.from_commandline(parser=parser, arguments=('-u', '-c', '-s', '-v')) auth = 'service' if project.args.service else 'user' # get report if project.args.report: report = API_DBM(auth).queries().getquery(queryId=project.args.report).execute() print(json.dumps(report, indent=2, sort_keys=True)) # get schema elif project.args.schema: filename, report = report_file(auth, project.args.schema, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) print(json.dumps(get_schema(rows)[1], indent=2, sort_keys=True)) # get sample elif project.args.sample: filename, report = report_file(auth, project.args.sample, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) for r in rows_print(rows, row_min=0, row_max=20): pass # get list else: for report in API_DBM(auth, iterate=True).queries().listqueries().execute(): print(json.dumps(report, indent=2, sort_keys=True))
def floodlight_rows( config, task: dict, report_id: int ) -> Generator[list[str, str, str, str, str, str, int], None, None]: """ Monitor a report for completion and return rows Args: report_id - the report created earlier for a specific floodlight id. Returns: A stream of rows, see FLOODLIGHT_* constants for definitions. """ # fetch report file if it exists filename, report = report_file( config, task['auth'], task['account'], report_id, None, # no name 10 # wait up to 10 minutes for report to complete ) # clean up rows rows = report_to_rows(report) rows = report_clean(rows) rows = rows_header_trim(rows) rows = rows_to_type(rows, column=6) return rows
def create_cm_site_segmentation(config, task): # Read sheet to bq table sheet_rows = sheets_read(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [ { "type": "STRING", "name": "Site_Dcm", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Site_Type", "mode": "NULLABLE" } ] rows_to_table( config, auth=task['auth_bq'], project_id=config.project, dataset_id=task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE' ) # Get Site_Type from the sheet run_query_from_file(config, task, Queries.cm_site_segmentation.replace('{{dataset}}', task['dataset']), CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + config.project + '.' + task['dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows(config, task['auth_bq'], config.project, task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
def sheets(): rows = sheets_read(project.task['auth'], project.task['sheets']['url'], project.task['sheets']['tab'], project.task['sheets']['range']) rows = rows_to_type(rows) object_compare(list(rows), project.task['sheets']['values'])
def sheets(config, task): print('TEST: sheets') rows = sheets_read(config, task['auth'], task['sheets']['sheet'], task['sheets']['tab'], task['sheets']['range']) rows = rows_to_type(rows) object_compare(list(rows), task['sheets']['values'])
def create_cm_site_segmentation(project): # Read sheet to bq table sheet_rows = sheets_read('user', project.task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [{ "type": "STRING", "name": "Site_Dcm", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Site_Type", "mode": "NULLABLE" }] rows_to_table(auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Get Site_Type from the sheet run_query_from_file( os.path.join(os.path.dirname(__file__), SQL_DIRECTORY + CM_SITE_SEGMENTATION_FILENAME), project.id, project.task['dataset'], CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows('service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
def create_dv360_segments(config, task): a1_notation = 'A:N' schema = [ { "type": "STRING", "name": "Advertiser", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Advertiser_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Campaign", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Campaign_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Insertion_Order", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Insertion_Order_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Line_Item_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item_Type", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "SegmentAutoGen", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment1", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment2", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment3", "mode": "NULLABLE" } ] sheet_rows = sheets_read(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table( config, auth=task['auth_bq'], project_id=config.project, dataset_id=task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE' ) # Run Query if config.verbose: print('RUN DV360 Custom Segments Query') run_query_from_file(config, task, Queries.dv360_custom_segments.replace('{{dataset}}', task['dataset']), DV360_CUSTOM_SEGMENTS_TABLE) # Move Table back to sheets query = 'SELECT * from `' + config.project + '.' + task['dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows(config, task['auth_bq'], config.project, task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation) sheets_write(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation, rows)
def floodlight_rows(report_id): # fetch report file if it exists filename, report = report_file(project.task['auth'], project.task['account'], report_id, None, 10) # clean up rows rows = report_to_rows(report) rows = report_clean(rows) rows = rows_header_trim(rows) rows = rows_to_type(rows, column=6) return rows
def floodlight_monitor(): if project.verbose: print("FLOODLIGHT MONITOR") # make sure tab exists in sheet ( deprecated, use sheet task instead ) if 'template' in project.task['sheet']: sheets_tab_copy(project.task['auth'], project.task['sheet']['template']['sheet'], project.task['sheet']['template']['tab'], project.task['sheet']['sheet'], project.task['sheet']['tab']) # read peers from sheet triggers = sheets_read(project.task['auth'], project.task['sheet']['sheet'], project.task['sheet']['tab'], project.task['sheet']['range']) # 0 - Floodlight Id # 1 - email if project.verbose and len(triggers) == 0: print("FLOODLIGHT MONITOR: No floodlight ids specified in sheet.") alerts = {} day = None for trigger in triggers: # get report data for each floodlight report = floodlight_report(trigger[0]) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_header_trim(rows) rows = rows_to_type(rows, column=6) # calculate outliers last_day, rows = floodlight_analysis(rows) if last_day: # find last day report ran day = last_day if day is None else max(day, last_day) # group alerts by email alerts.setdefault(trigger[1], []) alerts[trigger[1]].extend(rows) if alerts: floodlight_email(day, alerts)
def create_cm_site_segmentation(project): # Read sheet to bq table sheet_rows = sheets_read( 'user', project.task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [{ 'type': 'STRING', 'name': 'Site_Dcm', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Impressions', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Site_Type', 'mode': 'NULLABLE' }] rows_to_table( auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Get Site_Type from the sheet run_query_from_file(Queries.cm_site_segmentation, CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows( 'service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
# initialize project project.from_commandline(parser=parser) auth = 'service' if project.args.service else 'user' # get report if project.args.report: report = API_DBM(auth).queries().getquery(queryId=project.args.report).execute() print json.dumps(report, indent=2, sort_keys=True) # get schema elif project.args.schema: filename, report = report_file(auth, project.args.schema, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) print json.dumps(get_schema(rows)[1], indent=2, sort_keys=True) # get sample elif project.args.sample: filename, report = report_file(auth, project.args.sample, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) for r in rows_print(rows, row_min=0, row_max=20): pass # get list else: for report in API_DBM(auth, iterate=True).queries().listqueries().execute(): print json.dumps(report, indent=2, sort_keys=True)
def bigquery_query(): """Execute a query and write results to table. TODO: Replace with get_rows and put_rows combination. See: scripts/bigquery_query.json scripts/bigquery_storage.json scripts/bigquery_to_sheet.json scripts/bigquery_view.json """ if 'table' in project.task['to']: if project.verbose: print('QUERY TO TABLE', project.task['to']['table']) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get('useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get('project_id', project.id)) elif 'sheet' in project.task['to']: if project.verbose: print('QUERY TO SHEET', project.task['to']['sheet']) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], query_parameters( project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: if project.verbose: print('QUERY TO SFTP') rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('use_legacy_sql', True)) if rows: put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print('QUERY TO VIEW', project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get('useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False))
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Command line to help debug CM reports and build reporting tools. Examples: To get list of reports: python cm.py --account [id] --list -u [user credentials path] To get report: python cm.py --account [id] --report [id] -u [user credentials path] To get report files: python cm.py --account [id] --files [id] -u [user credentials path] To get report sample: python cm.py --account [id] --sample [id] -u [user credentials path] To get report schema: python cm.py --account [id] --schema [id] -u [user credentials path] """)) parser.add_argument('--account', help='Account ID to use to pull the report.', default=None) parser.add_argument('--report', help='Report ID to pull JSON definition.', default=None) parser.add_argument('--schema', help='Report ID to pull achema definition.', default=None) parser.add_argument('--sample', help='Report ID to pull sample data.', default=None) parser.add_argument('--files', help='Report ID to pull file list.', default=None) parser.add_argument('--list', help='List reports.', action='store_true') # initialize project parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-v')) args = parser.parse_args() config = Configuration(user=args.user, client=args.client, service=args.service, verbose=args.verbose) auth = 'service' if args.service else 'user' is_superuser, profile = get_profile_for_api(config, auth, args.account) kwargs = { 'profileId': profile, 'accountId': args.account } if is_superuser else { 'profileId': profile } # get report list if args.report: kwargs['reportId'] = args.report report = API_DCM( config, auth, internal=is_superuser).reports().get(**kwargs).execute() print(json.dumps(report, indent=2, sort_keys=True)) # get report files elif args.files: kwargs['reportId'] = args.files for rf in API_DCM( config, auth, internal=is_superuser, iterate=True).reports().files().list(**kwargs).execute(): print(json.dumps(rf, indent=2, sort_keys=True)) # get schema elif args.schema: filename, report = report_file(config, auth, args.account, args.schema, None, 10) rows = report_to_rows(report) rows = report_clean(rows) print(json.dumps(report_schema(next(rows)), indent=2, sort_keys=True)) # get sample elif args.sample: filename, report = report_file(config, auth, args.account, args.sample, None, 10) rows = report_to_rows(report) rows = report_clean(rows) rows = rows_to_type(rows) for r in rows_print(rows, row_min=0, row_max=20): pass # get list else: for report in API_DCM(config, auth, internal=is_superuser, iterate=True).reports().list(**kwargs).execute(): print(json.dumps(report, indent=2, sort_keys=True))
def sheets(): if project.verbose: print 'SHEETS' # clear if specified if project.task.get('clear', False): sheets_clear(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range']) # delete if specified ( after clear to prevent errors in case both are given ) if project.task.get('delete', False): sheets_tab_delete(project.task['auth'], project.task['sheet'], project.task['tab']) # create or copy if specified if 'template' in project.task: sheets_tab_copy(project.task['auth'], project.task['template']['sheet'], project.task['template']['tab'], project.task['sheet'], project.task['tab']) else: sheets_tab_create(project.task['auth'], project.task['sheet'], project.task['tab']) # write data if specified if 'write' in project.task: rows = get_rows(project.task['auth'], project.task['write']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows) # move if specified if 'out' in project.task: rows = sheets_read(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range']) if rows: schema = None # RECOMMENDED: define schema in json if project.task['out']['bigquery'].get('schema'): if project.verbose: print 'SHEETS SCHEMA DEFINED' schema = project.task['out']['bigquery']['schema'] # NOT RECOMMENDED: determine schema if missing else: if project.verbose: print 'SHEETS SCHEMA DETECT ( Note Recommended - Define Schema In JSON )' # cast rows to types ( for schema detection ) rows = rows_to_type(rows) rows, schema = get_schema(rows, project.task.get('header', False), infer_type=project.task.get( 'infer_type', True)) # write to table ( not using put because no use cases for other destinations ) rows_to_table( auth=project.task['out'].get('auth', project.task['auth']), project_id=project.id, dataset_id=project.task['out']['bigquery']['dataset'], table_id=project.task['out']['bigquery']['table'], rows=rows, schema=schema, skip_rows=1 if project.task.get('header', False) else 0, disposition=project.task['out']['bigquery'].get( 'disposition', 'WRITE_TRUNCATE')) else: print 'SHEET EMPTY'
def put_rows(auth, destination, rows, schema=None, variant=''): """Processes standard write JSON block for dynamic export of data. Allows us to quickly write the results of a script to a destination. For example write the results of a DCM report into BigQuery. - Will write to multiple destinations if specified. - Extensible, add a handler to define a new destination ( be kind update the documentation json ). Include the following JSON in a recipe, then in the run.py handler when encountering that block pass it to this function and use the returned results. from utils.data import put_rows var_json = { "out":{ "bigquery":{ "auth":"[ user or service ]", "dataset": [ string ], "table": [ string ] "schema": [ json - standard bigquery schema json ], "header": [ boolean - true if header exists in rows ] "disposition": [ string - same as BigQuery documentation ] }, "sheets":{ "auth":"[ user or service ]", "sheet":[ string - full URL, suggest using share link ], "tab":[ string ], "range":[ string - A1:A notation ] "append": [ boolean - if sheet range should be appended to ] "delete": [ boolean - if sheet range should be cleared before writing ] ] }, "storage":{ "auth":"[ user or service ]", "bucket": [ string ], "path": [ string ] }, "file":[ string - full path to place to write file ] } } values = put_rows('user', var_json) Or you can use it directly with project singleton. from util.project import project from utils.data import put_rows @project.from_parameters def something(): values = get_rows(project.task['auth'], project.task['out']) if __name__ == "__main__": something() Args: auth: (string) The type of authentication to use, user or service. rows: ( iterator ) The list of rows to be written, if NULL no action is performed. schema: (json) A bigquery schema definition. destination: (json) A json block resembling var_json described above. rows ( list ) The data being written as a list object. variant (string) Appended to destination to differentieate multiple objects Returns: If unnest is False: Returns a list of row values [[v1], [v2], ... ] If unnest is True: Returns a list of values [v1, v2, ...] """ if rows is None: if project.verbose: print('PUT ROWS: Rows is None, ignoring write.') return if 'bigquery' in destination: if not schema: schema = destination['bigquery'].get('schema') skip_rows = 1 if destination['bigquery'].get('header') and schema else 0 if destination['bigquery'].get('format', 'CSV') == 'JSON': json_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) elif destination['bigquery'].get('is_incremental_load', False) == True: incremental_rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('skip_rows', skip_rows), destination['bigquery'].get('disposition', 'WRITE_APPEND'), billing_project_id=project.id) else: rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('skip_rows', skip_rows), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) if 'sheets' in destination: if destination['sheets'].get('delete', False): sheets_clear( destination['sheets'].get('auth', auth), destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], ) sheets_write( destination['sheets'].get('auth', auth), destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], rows_to_type(rows), destination['sheets'].get('append', False), ) if 'file' in destination: path_out, file_ext = destination['file'].rsplit('.', 1) file_out = path_out + variant + '.' + file_ext if project.verbose: print('SAVING', file_out) makedirs_safe(parse_path(file_out)) with open(file_out, 'w') as save_file: save_file.write(rows_to_csv(rows).read()) if 'storage' in destination and destination['storage'].get( 'bucket') and destination['storage'].get('path'): bucket_create( destination['storage'].get('auth', auth), project.id, destination['storage']['bucket'] ) # put the file file_out = destination['storage']['bucket'] + ':' + destination['storage'][ 'path'] + variant if project.verbose: print('SAVING', file_out) object_put(auth, file_out, rows_to_csv(rows)) if 'sftp' in destination: try: cnopts = pysftp.CnOpts() cnopts.hostkeys = None path_out, file_out = destination['sftp']['file'].rsplit('.', 1) file_out = path_out + variant + file_out sftp = pysftp.Connection( host=destination['sftp']['host'], username=destination['sftp']['username'], password=destination['sftp']['password'], port=destination['sftp']['port'], cnopts=cnopts) if '/' in file_out: dir_out, file_out = file_out.rsplit('/', 1) sftp.cwd(dir_out) sftp.putfo(rows_to_csv(rows), file_out) except e: print(str(e)) traceback.print_exc()
def bigquery(): if 'run' in project.task and 'query' in project.task.get('run', {}): if project.verbose: print("QUERY", project.task['run']['query']) run_query( project.task['auth'], project.id, project.task['run']['query'], project.task['run'].get('legacy', True), #project.task['run'].get('billing_project_id', None) ) elif 'values' in project.task['from']: rows = get_rows(project.task['auth'], project.task['from']) rows_to_table(project.task['to'].get('auth', project.task['auth']), project.id, project.task['to']['dataset'], project.task['to']['table'], rows, project.task.get('schema', []), 0) elif 'query' in project.task['from']: if 'table' in project.task['to']: if project.verbose: print("QUERY TO TABLE", project.task['to']['table']) if 'pre_process_query' in project.task['to']: print('executing statement') execute_statement(project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['pre_process_query'], use_legacy_sql=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True))) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get( 'project_id', project.id)) # NOT USED SO RIPPING IT OUT # Mauriciod: Yes, it is used, look at project/mauriciod/target_winrate.json elif 'storage' in project.task['to']: if project.verbose: print("QUERY TO STORAGE", project.task['to']['storage']) local_file_name = '/tmp/%s' % str(uuid.uuid1()) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query']) f = open(local_file_name, 'wb') writer = csv.writer(f) writer.writerows(rows) f.close() f = open(local_file_name, 'rb') object_put(project.task['auth'], project.task['to']['storage'], f) f.close() os.remove(local_file_name) elif 'sheet' in project.task['to']: if project.verbose: print("QUERY TO SHEET", project.task['to']['sheet']) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query'], legacy=project.task['from'].get( 'legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['auth'], project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['auth'], project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query'], legacy=project.task['from'].get( 'use_legacy_sql', True)) if rows: if project.verbose: print("QUERY TO SFTP") put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print("QUERY TO VIEW", project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False)) else: if project.verbose: print("STORAGE TO TABLE", project.task['to']['table']) storage_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], project.task['from']['bucket'] + ':' + project.task['from']['path'], project.task.get('schema', []), project.task.get('skip_rows', 1), project.task.get('structure', 'CSV'), project.task.get('disposition', 'WRITE_TRUNCATE'))
def bigquery(): if 'function' in project.task: query = None if project.task['function'] == 'pearson_significance_test': query = pearson_significance_test() if query: run_query(project.task['auth'], project.id, query, False, project.task['to']['dataset']) elif 'run' in project.task and 'query' in project.task.get('run', {}): if project.verbose: print('QUERY', project.task['run']['query']) run_query( project.task['auth'], project.id, query_parameters(project.task['run']['query'], project.task['run'].get('parameters')), project.task['run'].get('legacy', True), ) elif 'values' in project.task['from']: rows = get_rows(project.task['auth'], project.task['from']) rows_to_table(project.task['to'].get('auth', project.task['auth']), project.id, project.task['to']['dataset'], project.task['to']['table'], rows, project.task.get('schema', []), 0) elif 'query' in project.task['from']: if 'table' in project.task['to']: if project.verbose: print('QUERY TO TABLE', project.task['to']['table']) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get( 'project_id', project.id)) elif 'sheet' in project.task['to']: if project.verbose: print('QUERY TO SHEET', project.task['to']['sheet']) rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('use_legacy_sql', True)) if rows: if project.verbose: print('QUERY TO SFTP') put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print('QUERY TO VIEW', project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False)) else: if project.verbose: print('STORAGE TO TABLE', project.task['to']['table']) storage_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], project.task['from']['bucket'] + ':' + project.task['from']['path'], project.task.get('schema', []), project.task.get('skip_rows', 1), project.task.get('structure', 'CSV'), project.task.get('disposition', 'WRITE_TRUNCATE'))
def sheets(): if project.verbose: print('SHEETS') # if sheet or tab is missing, don't do anything if not project.task.get('sheet') or not project.task.get('tab'): if project.verbose: print('Missing Sheet and/or Tab, skipping task.') return # delete if specified, will delete sheet if no more tabs remain if project.task.get('delete', False): sheets_tab_delete(project.task['auth'], project.task['sheet'], project.task['tab']) # create a sheet and tab if specified, if template if 'template' in project.task: sheets_create( project.task['auth'], project.task['sheet'], project.task['tab'], project.task['template'].get('sheet'), project.task['template'].get('tab'), ) # copy template if specified ( clear in this context means overwrite ) #if project.task.get('template', {}).get('sheet'): # sheets_tab_copy( # project.task['auth'], # project.task['template']['sheet'], # project.task['template']['tab'], # project.task['sheet'], # project.task['tab'], # project.task.get('clear', False) # ) # if no template at least create tab #else: # sheets_tab_create( # project.task['auth'], # project.task['sheet'], # project.task['tab'] # ) # clear if specified if project.task.get('clear', False): sheets_clear(project.task['auth'], project.task['sheet'], project.task['tab'], project.task.get('range', 'A1')) # write data if specified if 'write' in project.task: rows = get_rows(project.task['auth'], project.task['write']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows, append=False) # append data if specified if 'append' in project.task: rows = get_rows(project.task['auth'], project.task['append']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows, append=True) # move data if specified # move data if specified if 'out' in project.task: rows = sheets_read(project.task['auth'], project.task['sheet'], project.task['tab'], project.task.get('range', 'A1')) if rows: schema = None # RECOMMENDED: define schema in json if project.task['out']['bigquery'].get('schema'): if project.verbose: print('SHEETS SCHEMA DEFINED') schema = project.task['out']['bigquery']['schema'] # NOT RECOMMENDED: determine schema if missing else: if project.verbose: print( 'SHEETS SCHEMA DETECT ( Note Recommended - Define Schema In JSON )' ) # cast rows to types ( for schema detection ) rows = rows_to_type(rows) rows, schema = get_schema(rows, project.task.get('header', False), infer_type=project.task.get( 'infer_type', True)) # write to table ( not using put because no use cases for other destinations ) rows_to_table( auth=project.task['out'].get('auth', project.task['auth']), project_id=project.id, dataset_id=project.task['out']['bigquery']['dataset'], table_id=project.task['out']['bigquery']['table'], rows=rows, schema=schema, skip_rows=1 if project.task.get('header', False) else 0, disposition=project.task['out']['bigquery'].get( 'disposition', 'WRITE_TRUNCATE')) else: print('SHEET EMPTY')
def create_dv360_segments(project): a1_notation = 'A:M' schema = [{ 'type': 'STRING', 'name': 'Advertiser', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Advertiser_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Campaign', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Campaign_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Insertion_Order', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Insertion_Order_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Line_Item', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Line_Item_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Line_Item_Type', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Impressions', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment1', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment2', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment3', 'mode': 'NULLABLE' }] sheet_rows = sheets_read( 'user', project.task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table( auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Run Query if project.verbose: print('RUN DV360 Custom Segments Query') run_query_from_file(Queries.dv360_custom_segments, DV360_CUSTOM_SEGMENTS_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows( 'service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'DV3 Segments', a1_notation) sheets_write('user', project.task['sheet'], 'DV3 Segments', a1_notation, rows)
def create_dv360_segments(project): a1_notation = 'A:M' schema = [{ "type": "STRING", "name": "Advertiser", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Advertiser_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Campaign", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Campaign_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Insertion_Order", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Insertion_Order_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Line_Item_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item_Type", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment1", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment2", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment3", "mode": "NULLABLE" }] sheet_rows = sheets_read('user', project.task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table(auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Run Query path = os.path.join(os.path.dirname(__file__), SQL_DIRECTORY + DV360_CUSTOM_SEGMENTS_FILENAME) query = '' with open(path, 'r') as file: data = file.read().replace('\n', ' ') query = data.replace("{{project_id}}", project.id).replace("{{dataset}}", project.task['dataset']) print('DV360 CUSTOM SEGMENT TABLE') query_to_table('service', project.id, project.task['dataset'], DV360_CUSTOM_SEGMENTS_TABLE, query, legacy=False) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows('service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'DV3 Segments', a1_notation) sheets_write('user', project.task['sheet'], 'DV3 Segments', a1_notation, rows)