def report_shadow(name, dateRange, schedule, advertiser, campaign): if project.verbose: print("DYNAMIC COSTS SHADOW:", name) # create the report if it does not exist report = report_build( project.task["auth"], project.task["account"], { "kind": "dfareporting#report", "type": "STANDARD", "name": "Dynamic Costs %s - Shadow Advertiser ( StarThinker )" % name, "schedule": schedule, "criteria": { "dateRange": dateRange, "dimensionFilters": [ { "dimensionName": "dfa:advertiser", "id": advertiser, "kind": "dfareporting#dimensionValue", "matchType": "EXACT"}, { "dimensionName": "dfa:campaign", "id": campaign, "kind": "dfareporting#dimensionValue", "matchType": "EXACT" } ], "dimensions": [ {"kind": "dfareporting#sortedDimension", "name": "dfa:placement"}, {"kind": "dfareporting#sortedDimension", "name": "dfa:placementId"} ], "metricNames": [ "dfa:dbmCost" ] } } ) # fetch report file if it exists ( timeout = 0 means grab most reent ready ) filename, filedata = report_file( project.task["auth"], project.task["account"], report["id"] ) # write report to a table ( avoid collisions as best as possible ) table_name = "Dynamic_Costs_%s_Shadow_Advertiser" % name write_report( filedata, project.task["out"]["dataset"], table_name ) return table_name
def report_main(name, dateRange, advertiser, campaign, shadow=True): if project.verbose: print "DYNAMIC COSTS MAIN:", name # base report schema report_schema = { "kind": "dfareporting#report", "type": "STANDARD", "name": "Dynamic Costs %s - Main Advertiser ( StarThinker )" % name, "criteria": { "dateRange": dateRange, "dimensionFilters": [{ "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:advertiser", "id": advertiser, "matchType": "EXACT" }, { "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:campaign", "id": campaign, "matchType": "EXACT" }], "dimensions": [{ "kind": "dfareporting#sortedDimension", "name": "dfa:placement" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:placementId" }], "metricNames": ["dfa:impressions", "dfa:clicks"] } } # if not using shadow advertiser, pull DBM cost here if not shadow: report_schema["criteria"]["metricNames"].append("dfa:dbmCost") # create the report if it does not exist report = report_build(project.task["auth"], project.task["account"], report_schema) # fetch report file if it exists ( timeout = 0 means grab most reent ready ) filename, filedata = report_file(project.task["auth"], project.task["account"], report["id"], None, 60, DCM_CHUNK_SIZE) # write report to a table ( avoid collisions as best as possible ) table_name = "Dynamic_Costs_%s_Main_Advertiser" % name write_report(filedata, project.task["out"]["dataset"], table_name) return table_name
def dcm_replicate_create(account, advertisers, name, template): print('DCM REPLICATE CREATE', name) # check if report is to be deleted if project.task['report'].get('delete', False): report_delete(project.task['auth'], account, None, name) # add account and advertiser filters ( return new disctionary) body = report_filter( project.task['auth'], template, { 'accountId': { 'values': account }, 'dfa:advertiser': { 'values': advertisers } }) body['name'] = name #print('BODY', body) # create and run the report if it does not exist report = report_build(project.task['auth'], account, body)
def dcm(): if project.verbose: print('DCM') # stores existing report json report = None # check if report is to be deleted if project.task.get('delete', False): if project.verbose: print('DCM DELETE', project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None) or project.task['report'].get('report_id', None)) report_delete( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None), ) # check if report is to be run if project.task.get('report_run_only', False): if project.verbose: print('DCM REPORT RUN', project.task['report'].get('name', None) or project.task['report'].get('report_id', None)) report_run( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None), ) # check if report is to be created - DEPRECATED if 'type' in project.task['report']: if project.verbose: print('DCM CREATE') report = report_create( project.task['auth'], project.task['report']['account'], project.task['report']['name'], project.task['report'] ) # check if report is to be created if 'body' in project.task['report']: if project.verbose: print('DCM BUILD', project.task['report']['body']['name']) # filters can be passed using special get_rows handler, allows reading values from sheets etc... if 'filters' in project.task['report']: for f, d in project.task['report']['filters'].items(): for v in get_rows(project.task['auth'], d): # accounts are specified in a unique part of the report json if f in 'accountId': project.task['report']['body']['accountId'] = v # activities are specified in a unique part of the report json elif f in 'dfa:activity': project.task['report']['body']['reachCriteria']['activities'].setdefault('filters', []).append({ "kind":"dfareporting#dimensionValue", "dimensionName": f, "id": v }) # all other filters go in the same place else: project.task['report']['body']['criteria'].setdefault('dimensionFilters', []).append({ "kind":"dfareporting#dimensionValue", "dimensionName": f, "id": v, "matchType": "EXACT" }) report = report_build( project.task['auth'], project.task['report']['body'].get('accountId') or project.task['report']['account'], project.task['report']['body'] ) # moving a report if 'out' in project.task: filename, report = report_file( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None), project.task['report'].get('timeout', 10), ) if report: if project.verbose: print('DCM FILE', filename) # clean up the report rows = report_to_rows(report) rows = report_clean(rows) # if bigquery, remove header and determine schema schema = None if 'bigquery' in project.task['out']: schema = report_schema(next(rows)) project.task['out']['bigquery']['schema'] = schema project.task['out']['bigquery']['skip_rows'] = 0 # write rows using standard out block in json ( allows customization across all scripts ) if rows: put_rows(project.task['auth'], project.task['out'], rows)
def report_combos(name, dateRange, advertiser, campaign, dynamicProfile): if project.verbose: print "DYNAMIC COSTS COMBOS:", name # basic report schema, with no dynamic elements report_schema = { "kind": "dfareporting#report", "type": "STANDARD", "name": "Dynamic Costs %s - Dynamic Combos ( StarThinker )" % name, "criteria": { "dateRange": dateRange, "dimensionFilters": [{ "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:dynamicProfile", "id": dynamicProfile, "matchType": "EXACT" }, { "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:advertiser", "id": advertiser, "matchType": "EXACT" }, { "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:campaign", "id": campaign, "matchType": "EXACT" }], "dimensions": [{ "kind": "dfareporting#sortedDimension", "name": "dfa:placement" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:placementId" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:activity" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:activityId" }], "metricNames": ["dfa:impressions", "dfa:clicks", "dfa:totalConversions"] } } # add in all reasonable dynamic elements for i in range(1, 5 + 1): # 5 elements/feeds for j in range(1, 6 + 1): # 6 fields per element report_schema["criteria"]["dimensions"].append({ "kind": "dfareporting#sortedDimension", "name": "dfa:dynamicElement%iField%iValue" % (i, j) }) # create the report if it does not exist report = report_build(project.task["auth"], project.task["account"], report_schema) # fetch report file if it exists ( timeout = 0 means grab most reent ready ) filename, filedata = report_file(project.task["auth"], project.task["account"], report["id"], None, 60, DCM_CHUNK_SIZE) # write report to a table ( avoid collisions as best as possible ) table_name = "Dynamic_Costs_%s_Dynamic_Combos" % name write_report(filedata, project.task["out"]["dataset"], table_name) return table_name
def floodlight_report(floodlight_id): account_id, subaccount_id, profile_id = parse_account( project.task['auth'], project.task['account']) name = 'Floodlight Monitor %s %s ( StarThinker )' % (account_id, floodlight_id) if project.verbose: print "FLOODLIGHT MONITOR REPORT: ", name # create report if it does not exists report = report_build( project.task['auth'], project.task['account'], { 'kind': 'dfareporting#report', 'type': 'FLOODLIGHT', 'accountId': account_id, 'ownerProfileId': profile_id, 'name': name, 'fileName': name.replace('( ', '').replace(' )', '').replace( ' ', '_'), 'format': 'CSV', 'delivery': { 'emailOwner': False }, 'floodlightCriteria': { 'dateRange': { 'kind': 'dfareporting#dateRange', 'relativeDateRange': 'LAST_7_DAYS' }, 'dimensions': [{ 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:date' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:floodlightConfigId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityGroupId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityGroup' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activity' }], 'floodlightConfigId': { 'dimensionName': 'dfa:floodlightConfigId', 'kind': 'dfareporting#dimensionValue', 'matchType': 'EXACT', 'value': floodlight_id }, 'metricNames': ['dfa:floodlightImpressions'], 'reportProperties': { 'includeUnattributedCookieConversions': False, 'includeUnattributedIPConversions': False } }, 'schedule': { 'active': True, 'every': 1, 'repeats': 'DAILY', 'startDate': str(date.today()), 'expirationDate': str((date.today() + timedelta(days=365))), }, }) # fetch report file if it exists ( timeout = 0 means grab most reent ready ) filename, report = report_file(project.task['auth'], project.task['account'], report['id'], None, 0, DCM_CHUNK_SIZE) return report
def run_floodlight_reports(project): if project.verbose: print('Creating Floodlight reports') body = { "kind": "dfareporting#report", "name": '', # this is updated below based on Floodlight Config ID "format": "CSV", "type": "FLOODLIGHT", "floodlightCriteria": { "dateRange": { "kind": "dfareporting#dateRange", "relativeDateRange": "LAST_60_DAYS" }, "floodlightConfigId": { "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:floodlightConfigId", "value": 0, # updated below and replaced with Floodlight Config ID "matchType": "EXACT" }, "reportProperties": { "includeUnattributedIPConversions": False, "includeUnattributedCookieConversions": True }, "dimensions": [{ "kind": "dfareporting#sortedDimension", "name": "dfa:site" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:floodlightAttributionType" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:interactionType" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:pathType" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:browserPlatform" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:platformType" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:week" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:placementId" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:floodlightConfigId" }], "metricNames": [ "dfa:activityClickThroughConversions", "dfa:activityViewThroughConversions", "dfa:totalConversions", "dfa:totalConversionsRevenue" ] }, "schedule": { "active": True, "repeats": "WEEKLY", "every": 1, "repeatsOnWeekDays": ["Sunday"] }, "delivery": { "emailOwner": False } } reports = [] floodlightConfigs = project.task.get('floodlightConfigIds', None) for configId in floodlightConfigs: body['name'] = project.task.get('reportPrefix', '') + "_" + str(configId) body['floodlightCriteria']['floodlightConfigId']['value'] = configId report = report_build('user', project.task['account'], body) reports.append(report['id']) if project.verbose: print('Finished creating Floodlight reports - moving to BQ') queries = [] for createdReportId in reports: filename, report = report_file( 'user', project.task['account'], createdReportId, None, project.task.get('timeout', 10), ) if report: if project.verbose: print('Floodlight config report ', filename) # clean up the report rows = report_to_rows(report) rows = report_clean(rows) # determine schema schema = report_schema(next(rows)) out_block = {} out_block['bigquery'] = {} out_block['bigquery']['dataset'] = project.task['dataset'] out_block['bigquery']['schema'] = schema out_block['bigquery']['skip_rows'] = 0 out_block['bigquery']['table'] = 'z_Floodlight_CM_Report_' + str( createdReportId) # write rows using standard out block in json ( allows customization across all scripts ) if rows: put_rows('service', out_block, rows) queries.append('SELECT * FROM `{0}.{1}.{2}`'.format( project.id, out_block['bigquery']['dataset'], out_block['bigquery']['table'])) if project.verbose: print('Moved reports to BQ tables - starting join') finalQuery = ' UNION ALL '.join(queries) query_to_table('service', project.id, project.task['dataset'], CM_FLOODLIGHT_OUTPUT_TABLE, finalQuery, legacy=False) if project.verbose: print('Finished with Floodlight Config reports')
def dcm(): if project.verbose: print('DCM') # stores existing report json report = None # check if report is to be deleted if project.task.get('delete', False): if project.verbose: print('DCM DELETE', project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None) or project.task['report'].get('report_id', None)) report_delete( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None), ) # check if report is to be run if project.task.get('report_run_only', False): if project.verbose: print('DCM REPORT RUN', project.task['report'].get('name', None) or project.task['report'].get('report_id', None)) report_run( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None), ) # check if report is to be created if 'body' in project.task['report']: if project.verbose: print('DCM BUILD', project.task['report']['body']['name']) if 'filters' in project.task['report']: project.task['report']['body'] = report_filter( project.task['auth'], project.task['report']['body'], project.task['report']['filters'] ) report = report_build( project.task['auth'], project.task['report']['body'].get('accountId') or project.task['report']['account'], project.task['report']['body'] ) # moving a report if 'out' in project.task: filename, report = report_file( project.task['auth'], project.task['report']['account'], project.task['report'].get('report_id', None), project.task['report'].get('name', None) or project.task['report'].get('body', {}).get('name', None), project.task['report'].get('timeout', 10), ) if report: if project.verbose: print('DCM FILE', filename) # clean up the report rows = report_to_rows(report) rows = report_clean(rows) # if bigquery, remove header and determine schema schema = None if 'bigquery' in project.task['out']: schema = report_schema(next(rows)) project.task['out']['bigquery']['schema'] = schema project.task['out']['bigquery']['skip_rows'] = 0 # write rows using standard out block in json ( allows customization across all scripts ) if rows: put_rows(project.task['auth'], project.task['out'], rows)
def floodlight_report(floodlight_id: int) -> int: """ Create a report for a specific floodlight if it does not exist. Args: floodlight_id - the floodlight being monitored Returns: The id of the created report. """ account_id, subaccount_id = parse_account(project.task['auth'], project.task['account']) name = 'Floodlight Monitor %s %s ( StarThinker )' % (account_id, floodlight_id) if project.verbose: print('FLOODLIGHT MONITOR REPORT: ', name) # create report if it does not exists report = report_build( project.task['auth'], project.task['account'], { 'kind': 'dfareporting#report', 'type': 'FLOODLIGHT', 'accountId': account_id, 'name': name, 'fileName': name.replace('( ', '').replace(' )', '').replace( ' ', '_'), 'format': 'CSV', 'delivery': { 'emailOwner': False }, 'floodlightCriteria': { 'dateRange': { 'kind': 'dfareporting#dateRange', 'relativeDateRange': 'LAST_7_DAYS' }, 'dimensions': [{ 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:date' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:floodlightConfigId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityGroupId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityGroup' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activityId' }, { 'kind': 'dfareporting#sortedDimension', 'name': 'dfa:activity' }], 'floodlightConfigId': { 'dimensionName': 'dfa:floodlightConfigId', 'kind': 'dfareporting#dimensionValue', 'matchType': 'EXACT', 'value': floodlight_id }, 'metricNames': ['dfa:floodlightImpressions'], 'reportProperties': { 'includeUnattributedCookieConversions': False, 'includeUnattributedIPConversions': False } }, 'schedule': { 'active': True, 'every': 1, 'repeats': 'DAILY', 'startDate': str(date.today()), 'expirationDate': str((date.today() + timedelta(days=365))), }, }) return report['id']
def create_and_move_cm_browser_report(project): browser_report_body = { "kind": "dfareporting#report", "name": project.task['cm_browser_report_name'], "fileName": project.task['cm_browser_report_name'], "format": "CSV", "type": "STANDARD", "criteria": { "dateRange": { "kind": "dfareporting#dateRange", "relativeDateRange": "LAST_24_MONTHS" }, "dimensions": [{ "kind": "dfareporting#sortedDimension", "name": "dfa:campaign" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:campaignId" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:site" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:advertiser" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:advertiserId" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:browserPlatform" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:platformType" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:month" }, { "kind": "dfareporting#sortedDimension", "name": "dfa:week" }], "metricNames": [ "dfa:impressions", "dfa:clicks", "dfa:totalConversions", "dfa:activityViewThroughConversions", "dfa:activityClickThroughConversions" ], "dimensionFilters": [] }, "schedule": { "active": False, "repeats": "DAILY", "every": 1, "startDate": "2019-09-10", "expirationDate": "2029-12-09" }, "delivery": { "emailOwner": False } } # Remove any duplicate entries from the advertiser ids advertiser_ids = project.task['advertiser_ids'].split(',') # Update body with all the advertiser filters for advertiser in advertiser_ids: if advertiser: browser_report_body["criteria"]["dimensionFilters"].append({ "kind": "dfareporting#dimensionValue", "dimensionName": "dfa:advertiser", "id": advertiser, "matchType": "EXACT" }) # Create report report = report_build('user', project.task['account'], browser_report_body) # moving a report filename, report = report_file( 'user', project.task['account'], None, project.task['cm_browser_report_name'], project.task.get('timeout', 60), ) if report: if project.verbose: print('DCM FILE: ' + filename) # clean up the report rows = report_to_rows(report) rows = report_clean(rows) # if bigquery, remove header and determine schema schema = report_schema(rows.__next__()) bigquery_out = {} bigquery_out["bigquery"] = { "dataset": project.task["dataset"], #todo update to reac from project "table": CM_BROWSER_REPORT_DIRTY_TABLE, "is_incremental_load": False, "datastudio": True, "schema": schema, "skip_rows": 0 } # write rows using standard out block in json ( allows customization across all scripts ) if rows: put_rows(project.task['auth'], bigquery_out, rows)