def update(self): """Updates the related Bulkdozer feed item with the values in this object.""" new_feed = self._dict_to_feed(parse=self._parse) if len(new_feed) > 1: sheets_write(self.config, self.auth, self.trix_id, self.tab_name, self.trix_range, new_feed)
def create_cm_site_segmentation(config, task): # Read sheet to bq table sheet_rows = sheets_read(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [ { "type": "STRING", "name": "Site_Dcm", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Site_Type", "mode": "NULLABLE" } ] rows_to_table( config, auth=task['auth_bq'], project_id=config.project, dataset_id=task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE' ) # Get Site_Type from the sheet run_query_from_file(config, task, Queries.cm_site_segmentation.replace('{{dataset}}', task['dataset']), CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + config.project + '.' + task['dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows(config, task['auth_bq'], config.project, task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write(config, task['auth_sheets'], task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
def flush(self): """Flushes the message buffer writing buffered messages to the sheet. """ if self._buffer: sheets_write(self.auth, self.trix_id, 'Log', 'A' + str(self._row), self._buffer) self._row += len(self._buffer) self._buffer = []
def create_cm_site_segmentation(project): # Read sheet to bq table sheet_rows = sheets_read('user', project.task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [{ "type": "STRING", "name": "Site_Dcm", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Site_Type", "mode": "NULLABLE" }] rows_to_table(auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Get Site_Type from the sheet run_query_from_file( os.path.join(os.path.dirname(__file__), SQL_DIRECTORY + CM_SITE_SEGMENTATION_FILENAME), project.id, project.task['dataset'], CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows('service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
def create_dv360_segments(config, task): a1_notation = 'A:N' schema = [ { "type": "STRING", "name": "Advertiser", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Advertiser_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Campaign", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Campaign_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Insertion_Order", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Insertion_Order_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Line_Item_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item_Type", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "SegmentAutoGen", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment1", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment2", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment3", "mode": "NULLABLE" } ] sheet_rows = sheets_read(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table( config, auth=task['auth_bq'], project_id=config.project, dataset_id=task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE' ) # Run Query if config.verbose: print('RUN DV360 Custom Segments Query') run_query_from_file(config, task, Queries.dv360_custom_segments.replace('{{dataset}}', task['dataset']), DV360_CUSTOM_SEGMENTS_TABLE) # Move Table back to sheets query = 'SELECT * from `' + config.project + '.' + task['dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows(config, task['auth_bq'], config.project, task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation) sheets_write(config, task['auth_sheets'], task['sheet'], 'DV3 Segments', a1_notation, rows)
def flush(self): """Flushes the message buffer writing buffered messages to the sheet.""" if self._buffer: sheets_write(self.config, self.auth, self.trix_id, 'Log', 'A1', self._buffer, append=True) self._row += len(self._buffer) self._buffer = []
def save_id_map(self): """Saves the ID map into the Bulkdozer feed. """ if self.trix_id: columns = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'W', 'X', 'Y', 'Z' ] data = json.dumps(self._id_map) data = [ data[start:start + 49999] for start in xrange(0, len(data), 49999) ] sheets_write(self.auth, self.trix_id, 'Store', 'A:' + columns[len(data) + 1], [data])
def create_cm_site_segmentation(project): # Read sheet to bq table sheet_rows = sheets_read( 'user', project.task['sheet'], 'CM_Site_Segments', 'A:C', retries=10) if not sheet_rows: sheet_rows = [] schema = [{ 'type': 'STRING', 'name': 'Site_Dcm', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Impressions', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Site_Type', 'mode': 'NULLABLE' }] rows_to_table( auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=CM_SITE_SEGMENTATION_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Get Site_Type from the sheet run_query_from_file(Queries.cm_site_segmentation, CM_SITE_SEGMENTATION_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + CM_SITE_SEGMENTATION_TABLE + '`' rows = query_to_rows( 'service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C') sheets_write('user', project.task['sheet'], 'CM_Site_Segments', 'A2:C', rows)
def put_rows(auth, destination, rows, variant=''): """Processes standard write JSON block for dynamic export of data. Allows us to quickly write the results of a script to a destination. For example write the results of a DCM report into BigQuery. - Will write to multiple destinations if specified. - Extensible, add a handler to define a new destination ( be kind update the documentation json ). Include the following JSON in a recipe, then in the run.py handler when encountering that block pass it to this function and use the returned results. from utils.data import put_rows var_json = { "out":{ "bigquery":{ "dataset": [ string ], "table": [ string ] "schema": [ json - standard bigquery schema json ], "skip_rows": [ integer - for removing header ] "disposition": [ string - same as BigQuery documentation ] }, "sheets":{ "sheet":[ string - full URL, suggest using share link ], "tab":[ string ], "range":[ string - A1:A notation ] "delete": [ boolean - if sheet range should be cleared before writing ] }, "storage":{ "bucket": [ string ], "path": [ string ] }, "file":[ string - full path to place to write file ] } } values = put_rows('user', var_json) Or you can use it directly with project singleton. from util.project import project from utils.data import put_rows @project.from_parameters def something(): values = get_rows(project.task['auth'], project.task['out']) if __name__ == "__main__": something() Args: auth: (string) The type of authentication to use, user or service. destination: (json) A json block resembling var_json described above. rows ( list ) The data being written as a list object. variant (string) Appended to destination to differentieate multiple objects Returns: If single_cell is False: Returns a list of row values [[v1], [v2], ... ] If single_cell is True: Returns a list of values [v1, v2, ...] """ if 'bigquery' in destination: if destination['bigquery'].get('format', 'CSV') == 'JSON': json_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) elif destination['bigquery'].get('is_incremental_load', False) == True: incremental_rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get( 'skip_rows', 1), #0 if 'schema' in destination['bigquery'] else 1), destination['bigquery'].get('disposition', 'WRITE_APPEND'), billing_project_id=project.id) else: rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get( 'skip_rows', 1), #0 if 'schema' in destination['bigquery'] else 1), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) if 'sheets' in destination: if destination['sheets'].get('delete', False): sheets_clear( auth, destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], ) sheets_write(auth, destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], rows) if 'file' in destination: path_out, file_ext = destination['file'].rsplit('.', 1) file_out = path_out + variant + '.' + file_ext if project.verbose: print('SAVING', file_out) makedirs_safe(parse_path(file_out)) with open(file_out, 'w') as save_file: save_file.write(rows_to_csv(rows).read()) if 'storage' in destination and destination['storage'].get( 'bucket') and destination['storage'].get('path'): # create the bucket bucket_create(auth, project.id, destination['storage']['bucket']) # put the file file_out = destination['storage']['bucket'] + ':' + destination[ 'storage']['path'] + variant if project.verbose: print('SAVING', file_out) object_put(auth, file_out, rows_to_csv(rows)) if 'sftp' in destination: try: cnopts = pysftp.CnOpts() cnopts.hostkeys = None path_out, file_out = destination['sftp']['file'].rsplit('.', 1) file_out = path_out + variant + file_out sftp = pysftp.Connection(host=destination['sftp']['host'], username=destination['sftp']['username'], password=destination['sftp']['password'], port=destination['sftp']['port'], cnopts=cnopts) if '/' in file_out: dir_out, file_out = file_out.rsplit('/', 1) sftp.cwd(dir_out) sftp.putfo(rows_to_csv(rows), file_out) except e: print(str(e)) traceback.print_exc()
def bigquery_query(): """Execute a query and write results to table. TODO: Replace with get_rows and put_rows combination. See: scripts/bigquery_query.json scripts/bigquery_storage.json scripts/bigquery_to_sheet.json scripts/bigquery_view.json """ if 'table' in project.task['to']: if project.verbose: print('QUERY TO TABLE', project.task['to']['table']) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get('useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get('project_id', project.id)) elif 'sheet' in project.task['to']: if project.verbose: print('QUERY TO SHEET', project.task['to']['sheet']) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], query_parameters( project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: if project.verbose: print('QUERY TO SFTP') rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('use_legacy_sql', True)) if rows: put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print('QUERY TO VIEW', project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get('useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False))
def sheets(): if project.verbose: print 'SHEETS' # clear if specified if project.task.get('clear', False): sheets_clear(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range']) # delete if specified ( after clear to prevent errors in case both are given ) if project.task.get('delete', False): sheets_tab_delete(project.task['auth'], project.task['sheet'], project.task['tab']) # create or copy if specified if 'template' in project.task: sheets_tab_copy(project.task['auth'], project.task['template']['sheet'], project.task['template']['tab'], project.task['sheet'], project.task['tab']) else: sheets_tab_create(project.task['auth'], project.task['sheet'], project.task['tab']) # write data if specified if 'write' in project.task: rows = get_rows(project.task['auth'], project.task['write']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows) # move if specified if 'out' in project.task: rows = sheets_read(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range']) if rows: schema = None # RECOMMENDED: define schema in json if project.task['out']['bigquery'].get('schema'): if project.verbose: print 'SHEETS SCHEMA DEFINED' schema = project.task['out']['bigquery']['schema'] # NOT RECOMMENDED: determine schema if missing else: if project.verbose: print 'SHEETS SCHEMA DETECT ( Note Recommended - Define Schema In JSON )' # cast rows to types ( for schema detection ) rows = rows_to_type(rows) rows, schema = get_schema(rows, project.task.get('header', False), infer_type=project.task.get( 'infer_type', True)) # write to table ( not using put because no use cases for other destinations ) rows_to_table( auth=project.task['out'].get('auth', project.task['auth']), project_id=project.id, dataset_id=project.task['out']['bigquery']['dataset'], table_id=project.task['out']['bigquery']['table'], rows=rows, schema=schema, skip_rows=1 if project.task.get('header', False) else 0, disposition=project.task['out']['bigquery'].get( 'disposition', 'WRITE_TRUNCATE')) else: print 'SHEET EMPTY'
def put_rows(auth, destination, filename, rows, variant=''): """Processes standard write JSON block for dynamic export of data. Allows us to quickly write the results of a script to a destination. For example write the results of a DCM report into BigQuery. - Will write to multiple destinations if specified. - Extensible, add a handler to define a new destination ( be kind update the documentation json ). Include the following JSON in a recipe, then in the run.py handler when encountering that block pass it to this function and use the returned results. from utils.data import put_rows var_json = { "out":{ "bigquery":{ "dataset": [ string ], "table": [ string ] "schema": [ json - standard bigquery schema json ], "skip_rows": [ integer - for removing header ] "disposition": [ string - same as BigQuery documentation ] }, "sheets":{ "url":[ string - full URL, suggest using share link ], "tab":[ string ], "range":[ string - A1:A notation ] "delete": [ boolean - if sheet range should be cleared before writing ] }, "storage":{ "bucket": [ string ], "path": [ string ] }, "directory":[ string - full path to place to write file ] } } values = put_rows('user', var_json) Or you can use it directly with project singleton. from util.project import project from utils.data import put_rows @project.from_parameters def something(): values = get_rows(project.task['auth'], project.task['out']) if __name__ == "__main__": something() Args: auth: (string) The type of authentication to use, user or service. destination: (json) A json block resembling var_json described above. filename: (string) A unique filename if writing to medium requiring one, Usually gnerated by script. rows ( list ) The data being written as a list object. variant ( string ) Appends this to the destination name to create a variant ( for example when downloading multiple tabs in a sheet ). Returns: If single_cell is False: Returns a list of row values [[v1], [v2], ... ] If single_cell is True: Returns a list of values [v1, v2, ...] """ if 'bigquery' in destination: if destination['bigquery'].get('format', 'CSV') == 'JSON': json_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) elif destination['bigquery'].get('is_incremental_load', False) == True: incremental_rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get( 'skip_rows', 1), #0 if 'schema' in destination['bigquery'] else 1), destination['bigquery'].get('disposition', 'WRITE_APPEND'), billing_project_id=project.id) else: rows_to_table( destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', project.id), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, destination['bigquery'].get('schema', []), destination['bigquery'].get( 'skip_rows', 1), #0 if 'schema' in destination['bigquery'] else 1), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) if 'sheets' in destination: if destination['sheets'].get('delete', False): sheets_clear(auth, destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range']) sheets_write(auth, destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], rows) if 'directory' in destination: file_out = destination['directory'] + variant + filename if project.verbose: print 'SAVING', file_out makedirs_safe(parse_path(file_out)) with open(file_out, 'wb') as save_file: save_file.write(rows_to_csv(rows).read()) if 'storage' in destination and destination['storage'].get( 'bucket') and destination['storage'].get('path'): # create the bucket bucket_create(auth, project.id, destination['storage']['bucket']) # put the file file_out = destination['storage']['bucket'] + ':' + destination[ 'storage']['path'] + variant + filename if project.verbose: print 'SAVING', file_out object_put(auth, file_out, rows_to_csv(rows)) # deprecated do not use if 'trix' in destination: trix_update(auth, destination['trix']['sheet_id'], destination['trix']['sheet_range'], rows_to_csv(rows), destination['trix']['clear']) if 'email' in destination: pass if 'sftp' in destination: try: sys.stderr = StringIO() cnopts = pysftp.CnOpts() cnopts.hostkeys = None file_prefix = 'report' if 'file_prefix' in destination['sftp']: file_prefix = destination['sftp'].get('file_prefix') del destination['sftp']['file_prefix'] #sftp_configs = destination['sftp'] #sftp_configs['cnopts'] = cnopts #sftp = pysftp.Connection(**sftp_configs) sftp = pysftp.Connection(host=destination['sftp']['host'], username=destination['sftp']['username'], password=destination['sftp']['password'], port=destination['sftp']['port'], cnopts=cnopts) if 'directory' in destination['sftp']: sftp.cwd(destination['sftp']['directory']) tmp_file_name = '/tmp/%s_%s.csv' % ( file_prefix, datetime.datetime.now().strftime('%Y-%m-%dT%H-%M-%S')) tmp_file = open(tmp_file_name, 'wb') tmp_file.write(rows_to_csv(rows).read()) tmp_file.close() sftp.put(tmp_file_name) os.remove(tmp_file_name) sys.stderr = sys.__stderr__ except e: print e traceback.print_exc()
def update(self): """Writes configurations back to the Bulkdozer feed. """ sheets_write(self.auth, self.trix_id, 'Store', 'B3', [[self.mode]])
def bigquery(): if 'run' in project.task and 'query' in project.task.get('run', {}): if project.verbose: print("QUERY", project.task['run']['query']) run_query( project.task['auth'], project.id, project.task['run']['query'], project.task['run'].get('legacy', True), #project.task['run'].get('billing_project_id', None) ) elif 'values' in project.task['from']: rows = get_rows(project.task['auth'], project.task['from']) rows_to_table(project.task['to'].get('auth', project.task['auth']), project.id, project.task['to']['dataset'], project.task['to']['table'], rows, project.task.get('schema', []), 0) elif 'query' in project.task['from']: if 'table' in project.task['to']: if project.verbose: print("QUERY TO TABLE", project.task['to']['table']) if 'pre_process_query' in project.task['to']: print('executing statement') execute_statement(project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['pre_process_query'], use_legacy_sql=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True))) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get( 'project_id', project.id)) # NOT USED SO RIPPING IT OUT # Mauriciod: Yes, it is used, look at project/mauriciod/target_winrate.json elif 'storage' in project.task['to']: if project.verbose: print("QUERY TO STORAGE", project.task['to']['storage']) local_file_name = '/tmp/%s' % str(uuid.uuid1()) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query']) f = open(local_file_name, 'wb') writer = csv.writer(f) writer.writerows(rows) f.close() f = open(local_file_name, 'rb') object_put(project.task['auth'], project.task['to']['storage'], f) f.close() os.remove(local_file_name) elif 'sheet' in project.task['to']: if project.verbose: print("QUERY TO SHEET", project.task['to']['sheet']) rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query'], legacy=project.task['from'].get( 'legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['auth'], project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['auth'], project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: rows = query_to_rows(project.task['auth'], project.id, project.task['from']['dataset'], project.task['from']['query'], legacy=project.task['from'].get( 'use_legacy_sql', True)) if rows: if project.verbose: print("QUERY TO SFTP") put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print("QUERY TO VIEW", project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False)) else: if project.verbose: print("STORAGE TO TABLE", project.task['to']['table']) storage_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], project.task['from']['bucket'] + ':' + project.task['from']['path'], project.task.get('schema', []), project.task.get('skip_rows', 1), project.task.get('structure', 'CSV'), project.task.get('disposition', 'WRITE_TRUNCATE'))
def sheets(): if project.verbose: print('SHEETS') # if sheet or tab is missing, don't do anything if not project.task.get('sheet') or not project.task.get('tab'): if project.verbose: print('Missing Sheet and/or Tab, skipping task.') return # delete if specified, will delete sheet if no more tabs remain if project.task.get('delete', False): sheets_tab_delete(project.task['auth'], project.task['sheet'], project.task['tab']) # create a sheet and tab if specified, if template if 'template' in project.task: sheets_create( project.task['auth'], project.task['sheet'], project.task['tab'], project.task['template'].get('sheet'), project.task['template'].get('tab'), ) # copy template if specified ( clear in this context means overwrite ) #if project.task.get('template', {}).get('sheet'): # sheets_tab_copy( # project.task['auth'], # project.task['template']['sheet'], # project.task['template']['tab'], # project.task['sheet'], # project.task['tab'], # project.task.get('clear', False) # ) # if no template at least create tab #else: # sheets_tab_create( # project.task['auth'], # project.task['sheet'], # project.task['tab'] # ) # clear if specified if project.task.get('clear', False): sheets_clear(project.task['auth'], project.task['sheet'], project.task['tab'], project.task.get('range', 'A1')) # write data if specified if 'write' in project.task: rows = get_rows(project.task['auth'], project.task['write']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows, append=False) # append data if specified if 'append' in project.task: rows = get_rows(project.task['auth'], project.task['append']) sheets_write(project.task['auth'], project.task['sheet'], project.task['tab'], project.task['range'], rows, append=True) # move data if specified # move data if specified if 'out' in project.task: rows = sheets_read(project.task['auth'], project.task['sheet'], project.task['tab'], project.task.get('range', 'A1')) if rows: schema = None # RECOMMENDED: define schema in json if project.task['out']['bigquery'].get('schema'): if project.verbose: print('SHEETS SCHEMA DEFINED') schema = project.task['out']['bigquery']['schema'] # NOT RECOMMENDED: determine schema if missing else: if project.verbose: print( 'SHEETS SCHEMA DETECT ( Note Recommended - Define Schema In JSON )' ) # cast rows to types ( for schema detection ) rows = rows_to_type(rows) rows, schema = get_schema(rows, project.task.get('header', False), infer_type=project.task.get( 'infer_type', True)) # write to table ( not using put because no use cases for other destinations ) rows_to_table( auth=project.task['out'].get('auth', project.task['auth']), project_id=project.id, dataset_id=project.task['out']['bigquery']['dataset'], table_id=project.task['out']['bigquery']['table'], rows=rows, schema=schema, skip_rows=1 if project.task.get('header', False) else 0, disposition=project.task['out']['bigquery'].get( 'disposition', 'WRITE_TRUNCATE')) else: print('SHEET EMPTY')
def bigquery(): if 'function' in project.task: query = None if project.task['function'] == 'pearson_significance_test': query = pearson_significance_test() if query: run_query(project.task['auth'], project.id, query, False, project.task['to']['dataset']) elif 'run' in project.task and 'query' in project.task.get('run', {}): if project.verbose: print('QUERY', project.task['run']['query']) run_query( project.task['auth'], project.id, query_parameters(project.task['run']['query'], project.task['run'].get('parameters')), project.task['run'].get('legacy', True), ) elif 'values' in project.task['from']: rows = get_rows(project.task['auth'], project.task['from']) rows_to_table(project.task['to'].get('auth', project.task['auth']), project.id, project.task['to']['dataset'], project.task['to']['table'], rows, project.task.get('schema', []), 0) elif 'query' in project.task['from']: if 'table' in project.task['to']: if project.verbose: print('QUERY TO TABLE', project.task['to']['table']) query_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), disposition=project.task['write_disposition'] if 'write_disposition' in project.task else 'WRITE_TRUNCATE', legacy=project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql, target_project_id=project.task['to'].get( 'project_id', project.id)) elif 'sheet' in project.task['to']: if project.verbose: print('QUERY TO SHEET', project.task['to']['sheet']) rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('legacy', True)) # makes sure types are correct in sheet rows = rows_to_type(rows) sheets_clear(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2')) sheets_write(project.task['to'].get('auth', project.task['auth']), project.task['to']['sheet'], project.task['to']['tab'], project.task['to'].get('range', 'A2'), rows) elif 'sftp' in project.task['to']: rows = query_to_rows( project.task['auth'], project.id, project.task['from']['dataset'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), legacy=project.task['from'].get('use_legacy_sql', True)) if rows: if project.verbose: print('QUERY TO SFTP') put_rows(project.task['auth'], project.task['to'], rows) else: if project.verbose: print('QUERY TO VIEW', project.task['to']['view']) query_to_view( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['view'], query_parameters(project.task['from']['query'], project.task['from'].get('parameters')), project.task['from'].get( 'legacy', project.task['from'].get( 'useLegacySql', True)), # DEPRECATED: useLegacySql project.task['to'].get('replace', False)) else: if project.verbose: print('STORAGE TO TABLE', project.task['to']['table']) storage_to_table( project.task['auth'], project.id, project.task['to']['dataset'], project.task['to']['table'], project.task['from']['bucket'] + ':' + project.task['from']['path'], project.task.get('schema', []), project.task.get('skip_rows', 1), project.task.get('structure', 'CSV'), project.task.get('disposition', 'WRITE_TRUNCATE'))
def create_dv360_segments(project): a1_notation = 'A:M' schema = [{ "type": "STRING", "name": "Advertiser", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Advertiser_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Campaign", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Campaign_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Insertion_Order", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Insertion_Order_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Line_Item_Id", "mode": "NULLABLE" }, { "type": "STRING", "name": "Line_Item_Type", "mode": "NULLABLE" }, { "type": "INTEGER", "name": "Impressions", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment1", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment2", "mode": "NULLABLE" }, { "type": "STRING", "name": "Segment3", "mode": "NULLABLE" }] sheet_rows = sheets_read('user', project.task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table(auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Run Query path = os.path.join(os.path.dirname(__file__), SQL_DIRECTORY + DV360_CUSTOM_SEGMENTS_FILENAME) query = '' with open(path, 'r') as file: data = file.read().replace('\n', ' ') query = data.replace("{{project_id}}", project.id).replace("{{dataset}}", project.task['dataset']) print('DV360 CUSTOM SEGMENT TABLE') query_to_table('service', project.id, project.task['dataset'], DV360_CUSTOM_SEGMENTS_TABLE, query, legacy=False) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows('service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'DV3 Segments', a1_notation) sheets_write('user', project.task['sheet'], 'DV3 Segments', a1_notation, rows)
def create_dv360_segments(project): a1_notation = 'A:M' schema = [{ 'type': 'STRING', 'name': 'Advertiser', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Advertiser_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Campaign', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Campaign_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Insertion_Order', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Insertion_Order_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Line_Item', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Line_Item_Id', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Line_Item_Type', 'mode': 'NULLABLE' }, { 'type': 'INTEGER', 'name': 'Impressions', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment1', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment2', 'mode': 'NULLABLE' }, { 'type': 'STRING', 'name': 'Segment3', 'mode': 'NULLABLE' }] sheet_rows = sheets_read( 'user', project.task['sheet'], 'DV3 Segments', a1_notation, retries=10) if not sheet_rows: sheet_rows = [] print('DV360 SEGMENT SHEET TABLE WRITE') rows_to_table( auth='service', project_id=project.id, dataset_id=project.task['dataset'], table_id=DV360_CUSTOM_SEGMENTS_SHEET_TABLE, rows=sheet_rows, schema=schema, skip_rows=1, disposition='WRITE_TRUNCATE') # Run Query if project.verbose: print('RUN DV360 Custom Segments Query') run_query_from_file(Queries.dv360_custom_segments, DV360_CUSTOM_SEGMENTS_TABLE) # Move Table back to sheets query = 'SELECT * from `' + project.id + '.' + project.task[ 'dataset'] + '.' + DV360_CUSTOM_SEGMENTS_TABLE + '`' rows = query_to_rows( 'service', project.id, project.task['dataset'], query, legacy=False) # makes sure types are correct in sheet a1_notation = a1_notation[:1] + '2' + a1_notation[1:] rows = rows_to_type(rows) sheets_clear('user', project.task['sheet'], 'DV3 Segments', a1_notation) sheets_write('user', project.task['sheet'], 'DV3 Segments', a1_notation, rows)
def put_rows(config, auth, destination, rows, schema=None, variant=''): """Processes standard write JSON block for dynamic export of data. Allows us to quickly write the results of a script to a destination. For example write the results of a DCM report into BigQuery. - Will write to multiple destinations if specified. - Extensible, add a handler to define a new destination ( be kind update the documentation json ). Include the following JSON in a recipe, then in the run.py handler when encountering that block pass it to this function and use the returned results. from utils.data import put_rows var_json = { "out":{ "bigquery":{ "auth":"[ user or service ]", "dataset": [ string ], "table": [ string ] "schema": [ json - standard bigquery schema json ], "header": [ boolean - true if header exists in rows ] "disposition": [ string - same as BigQuery documentation ] }, "sheets":{ "auth":"[ user or service ]", "sheet":[ string - full URL, suggest using share link ], "tab":[ string ], "range":[ string - A1:A notation ] "append": [ boolean - if sheet range should be appended to ] "delete": [ boolean - if sheet range should be cleared before writing ] ] }, "storage":{ "auth":"[ user or service ]", "bucket": [ string ], "path": [ string ] }, "file":[ string - full path to place to write file ] } } values = put_rows('user', var_json) Args: auth: (string) The type of authentication to use, user or service. rows: ( iterator ) The list of rows to be written, if NULL no action is performed. schema: (json) A bigquery schema definition. destination: (json) A json block resembling var_json described above. rows ( list ) The data being written as a list object. variant (string) Appended to destination to differentieate multiple objects Returns: If unnest is False: Returns a list of row values [[v1], [v2], ... ] If unnest is True: Returns a list of values [v1, v2, ...] """ if rows is None: if config.verbose: print('PUT ROWS: Rows is None, ignoring write.') return if 'bigquery' in destination: if not schema: schema = destination['bigquery'].get('schema') skip_rows = 1 if destination['bigquery'].get('header') and schema else 0 if destination['bigquery'].get('format', 'CSV') == 'JSON': json_to_table( config, destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', config.project), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) elif destination['bigquery'].get('is_incremental_load', False) == True: incremental_rows_to_table( config, destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', config.project), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('skip_rows', skip_rows), destination['bigquery'].get('disposition', 'WRITE_APPEND'), billing_project_id=config.project) else: rows_to_table( config, destination['bigquery'].get('auth', auth), destination['bigquery'].get('project_id', config.project), destination['bigquery']['dataset'], destination['bigquery']['table'] + variant, rows, schema, destination['bigquery'].get('skip_rows', skip_rows), destination['bigquery'].get('disposition', 'WRITE_TRUNCATE'), ) if 'sheets' in destination: if destination['sheets'].get('delete', False): sheets_clear( config, destination['sheets'].get('auth', auth), destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], ) sheets_write( config, destination['sheets'].get('auth', auth), destination['sheets']['sheet'], destination['sheets']['tab'] + variant, destination['sheets']['range'], rows_to_type(rows), destination['sheets'].get('append', False), ) if 'file' in destination: path_out, file_ext = destination['file'].rsplit('.', 1) file_out = path_out + variant + '.' + file_ext if config.verbose: print('SAVING', file_out) makedirs_safe(parse_path(file_out)) with open(file_out, 'w') as save_file: save_file.write(rows_to_csv(rows).read()) if 'storage' in destination and destination['storage'].get( 'bucket') and destination['storage'].get('path'): bucket_create( config, destination['storage'].get('auth', auth), config.project, destination['storage']['bucket'] ) # put the file file_out = destination['storage']['bucket'] + ':' + destination['storage'][ 'path'] + variant if config.verbose: print('SAVING', file_out) object_put(config, auth, file_out, rows_to_csv(rows)) if 'sftp' in destination: try: cnopts = pysftp.CnOpts() cnopts.hostkeys = None path_out, file_out = destination['sftp']['file'].rsplit('.', 1) file_out = path_out + variant + file_out sftp = pysftp.Connection( host=destination['sftp']['host'], username=destination['sftp']['username'], password=destination['sftp']['password'], port=destination['sftp']['port'], cnopts=cnopts) if '/' in file_out: dir_out, file_out = file_out.rsplit('/', 1) sftp.cwd(dir_out) sftp.putfo(rows_to_csv(rows), file_out) except e: print(str(e)) traceback.print_exc()