def FBA_Reimbursements_Report(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=['_GET_FBA_REIMBURSEMENTS_DATA_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) data = "" #,"\n",d2 for i in d.GetReportRequestListResult.ReportRequestInfo: if i.ReportProcessingStatus == '_DONE_NO_DATA_': data += "No data present for the request" continue #print "----------------",i.ReportType,i.GeneratedReportId,"-------------------------" reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) data += report date = datetime.date.today() date = str(date) datelist = date.split('-') #address='marketing/amazonSellerCentral/'+datelist[0]+'/'+datelist[1]+'/' address = 'amazonsellercentral/landing/fba/' importtos3.write_to_s3( data, 'paragon-datalake', address + "mws_FBA_Reimbursements_REPORT" + date + ".csv")
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'AdGroupName', 'AdGroupStatus', 'AllConversions', 'AverageCpc', 'AverageCpm', 'AverageCpv', 'CampaignName', 'CampaignStatus', 'CityCriteriaId', 'Clicks', 'ConversionRate', 'Conversions', 'Cost', 'Ctr', 'Date', 'CustomerDescriptiveName', 'DayOfWeek', 'Device', 'Impressions', 'InteractionRate', 'Interactions', 'LocationType', 'MetroCriteriaId', 'VideoViews', 'RegionCriteriaId', 'CountryCriteriaId') # we are pulling these fields for our report .From('GEO_PERFORMANCE_REPORT') #.Where('CampaignStatus').In('ENABLED') #futher filter our attributes .During(date) # to provide a custom date ('DURING 20150201,20150301') .Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True)) #print(x) #we are printing the report and storing it in a CSV file f = open("geo_report" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/geo_report/" + "geo_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def Flat_File_Returns_Report_by_Return_Date(yesterday, tomorrow): d = mws.get_report_list( MaxCount=1, ReportTypeList=['_GET_FLAT_FILE_RETURNS_DATA_BY_RETURN_DATE_'], AvailableFromDate=yesterday + 'T14:32:16.50-07', AvailableToDate=tomorrow + 'T14:32:16.50-07') data = "" #print d for i in d.GetReportListResult.ReportInfo: #print "----------------",i.ReportType,i.ReportId,"-------------------------" if (i.ReportType != "FeedSummaryReport"): reportid = str(i.ReportId) report = mws.get_report(ReportId=reportid) #print report rep = str(report) data += rep date = datetime.date.today() date = str(date) datelist = date.split('-') address = 'marketing/amazonSellerCentral/' + datelist[0] + '/' + datelist[ 1] + '/' importtos3.write_to_s3( data, 'paragon-datalake', address + "Flat_File_Returns_Report_by_Return_Date" + date + ".csv")
def print_response(response, date, headers, startdate): """Parses and prints the Analytics Reporting API V4 response. Args: response: An Analytics Reporting API V4 response. """ stringfordata = headers for report in response.get('reports', []): columnHeader = report.get('columnHeader', {}) dimensionHeaders = columnHeader.get('dimensions', []) metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []) for row in report.get('data', {}).get('rows', []): dimensions = row.get('dimensions', []) dateRangeValues = row.get('metrics', []) for header, dimension in zip(dimensionHeaders, dimensions): stringfordata += ( dimension + ' ' ) #dimension based on which report is accessed for i, values in enumerate(dateRangeValues): for metricHeader, value in zip(metricHeaders, values.get('values')): stringfordata += (value + ' ' ) #to get the data into csv format stringfordata += "\n" #print(stringfordata) address = "googleanalytics/landing/product_details/" file_name = address + "product_details_page" + date + ".csv" importtos3.write_to_s3(stringfordata, 'paragon-datalake', file_name) '''
def main(client,date,full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = (adwords.ReportQueryBuilder() .Select('CampaignName','CampaignStatus','AdGroupName','AdGroupStatus','AverageCpc','AverageCpe', 'AverageCpm','AverageCpv','Clicks','Conversions','ConversionRate', 'Cost','Ctr','Date','CustomerDescriptiveName','DayOfWeek', 'Device','Impressions','Interactions','VideoViews','VideoQuartile25Rate', 'VideoQuartile50Rate','VideoQuartile75Rate','VideoQuartile100Rate', 'GmailForwards','GmailSaves','Status','Criteria') .From('GENDER_PERFORMANCE_REPORT') #these are the fields we are pulling for our report #.Where('Status').In('ENABLED') #further filter the attributes we are pulling using where clause .During(date) #custom date can be specified (look at age performance report for the syntax) #.During("20200210,20200210") .Build()) x=(report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) #we are printing the report and storing it in a CSV file f=open("gender_report"+full_date+".csv","w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/gender_report/"+"gender_report_"+full_date+".csv" importtos3.write_to_s3(x,'paragon-datalake',filename)
def Merchant_Listings_All_Data(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=['_GET_MERCHANT_LISTINGS_ALL_DATA_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) data = "" for i in d.GetReportRequestListResult.ReportRequestInfo: #print "\n\n----------------",i.ReportType,i.GeneratedReportId,"-------------------------" if (i.ReportType != "FeedSummaryReport"): reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) #print report data += report date = datetime.date.today() date = str(date) datelist = date.split('-') address = 'marketing/amazonSellerCentral/' + datelist[0] + '/' + datelist[ 1] + '/' importtos3.write_to_s3( data, 'paragon-datalake', address + 'Merchant_Listings_All_Data' + date + '.csv')
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'AccountDescriptiveName', 'Amount', 'AverageCost', 'AverageCpc', 'AverageCpe', 'AverageCpm', 'AverageCpv', 'AveragePosition', 'BounceRate', 'CampaignName', 'AverageTimeOnSite', 'AverageFrequency', 'AveragePageviews', 'CampaignStatus', 'CampaignTrialType', 'ClickAssistedConversions', 'ClickAssistedConversionsOverLastClickConversions', 'ClickAssistedConversionValue', 'Clicks', 'ContentBudgetLostImpressionShare', 'ContentImpressionShare', 'ContentRankLostImpressionShare', 'Conversions', 'Cost', 'CostPerConversion', 'Ctr', 'CustomerDescriptiveName', 'Date', 'EndDate', 'StartDate', 'GmailForwards', 'GmailSaves', 'GmailSecondaryClicks', 'HasRecommendedBudget', 'ImpressionAssistedConversions', 'ImpressionAssistedConversionsOverLastClickConversions', 'ImpressionAssistedConversionValue', 'ImpressionReach', 'Impressions', 'Interactions', 'InteractionRate', 'InvalidClickRate', 'InvalidClicks', 'IsBudgetExplicitlyShared', 'PercentNewVisitors', 'Period', 'RecommendedBudgetAmount', 'RelativeCtr', 'SearchAbsoluteTopImpressionShare', 'SearchBudgetLostAbsoluteTopImpressionShare', 'SearchBudgetLostImpressionShare', 'SearchBudgetLostTopImpressionShare', 'SearchClickShare', 'SearchExactMatchImpressionShare', 'SearchImpressionShare', 'SearchRankLostAbsoluteTopImpressionShare', 'SearchRankLostImpressionShare', 'SearchRankLostTopImpressionShare', 'SearchTopImpressionShare', 'ServingStatus', 'VideoQuartile75Rate', 'VideoQuartile25Rate', 'VideoQuartile50Rate', 'VideoQuartile100Rate', 'VideoViews', 'VideoViewRate' ) # these are the fields we are using to generate our report .From('CAMPAIGN_PERFORMANCE_REPORT') #name of the report #.Where('Status').In('ENABLED') #we can filter the attribiutes we are pulling using where clause #.During(date) # custom date can be specified ('DURING 20150201,20150301') .During(date).Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) #we are printing the report and storing it in a CSV file f = open("campaign_report_" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/campaign_report/" + "campaign_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'CampaignName', 'CampaignStatus', 'Engagements', 'EngagementRate', 'AdGroupName', 'AdGroupStatus', 'CriteriaType', 'AllConversionRate', 'AllConversionValue', 'AverageCost', 'AveragePosition', 'Criteria', 'FinalUrls', 'Clicks', 'Impressions', 'Conversions', 'ConversionValue', 'ConversionRate', 'Cost', 'Interactions', 'Date', 'InteractionRate', 'VideoViews', 'VideoViewRate', 'VideoQuartile50Rate', 'VideoQuartile100Rate', 'VideoQuartile25Rate', 'VideoQuartile75Rate', 'SearchPredictedCtr', 'Status', 'CreativeQualityScore', 'ActiveViewCpm', 'ActiveViewCtr', 'ActiveViewImpressions', 'ActiveViewMeasurability', 'CostPerConversion', 'DayOfWeek', 'GmailForwards', 'GmailSaves', 'Device', 'Ctr', 'AverageCpe', 'AverageCpv', 'AverageCpc', 'AverageCpm', 'AllConversions' ) # these are the fields we are using to generate our report .From('CRITERIA_PERFORMANCE_REPORT') #name of the report #.Where('Status').In('ENABLED') #we can filter the attribiutes we are pulling using where clause .During( date) # custom date can be specified ('DURING 20150201,20150301') .Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) #we are printing the report and storing it in a CSV file f = open("criteria_report" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/criteria_report/" + "criteria_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'AverageCost', 'AverageCpc', 'AverageCpe', 'AverageCpm', 'AverageCpv', 'BudgetName', 'BudgetReferenceCount', 'BudgetStatus', 'Clicks', 'Conversions', 'ConversionRate', 'Cost', 'Ctr', 'DeliveryMethod', 'EngagementRate', 'Engagements', 'HasRecommendedBudget', 'Impressions', 'InteractionRate', 'Interactions', 'IsBudgetExplicitlyShared', 'Period', 'RecommendedBudgetAmount', 'RecommendedBudgetEstimatedChangeInWeeklyClicks', 'RecommendedBudgetEstimatedChangeInWeeklyCost', 'RecommendedBudgetEstimatedChangeInWeeklyInteractions', 'RecommendedBudgetEstimatedChangeInWeeklyViews', 'TotalAmount', 'VideoViews', 'VideoViewRate').From( 'BUDGET_PERFORMANCE_REPORT' ) #these are the fields we want in our report #.Where('Status').In('ENABLED') #here we can filter attributes we have selcted above .During( date ) #a custom range can also be specified ('DURING 20150201,20150301') .Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) # we are printing the report and storing it as a CSV file f = open("budget_report_" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/budget_report/" + "budget_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def FBA_Returns_Report(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=['_GET_FBA_FULFILLMENT_CUSTOMER_RETURNS_DATA_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) #,"\n",d2 data = "" for i in d.GetReportRequestListResult.ReportRequestInfo: #print "----------------",i.ReportType,i.GeneratedReportId,"-------------------------" reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) data += report date = datetime.date.today() date = str(date) datelist = date.split('-') #address='marketing/amazonSellerCentral/'+datelist[0]+'/'+datelist[1]+'/' address = 'amazonsellercentral/landing/fba/' importtos3.write_to_s3(data, 'paragon-datalake', address + "mws_FBA_RETURNS_REPORT" + date + ".csv")
def Flat_File_All_Orders_Report_by_Order_Date(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=['_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) #print d data = "" for i in d.GetReportRequestListResult.ReportRequestInfo: reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) #print report data += report print data address = 'amazonSellerCentral/landing/fba/' importtos3.write_to_s3(data, 'paragon-datalake', address + "mws_performance.csv")
def Unshipped_Orders_Report(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=2, ReportTypeList=['_GET_FLAT_FILE_ACTIONABLE_ORDER_DATA_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) data = "" for i in d.GetReportRequestListResult.ReportRequestInfo: if (i.ReportType != "FeedSummaryReport"): reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) #print report data += report date = datetime.date.today() date = str(date) datelist = date.split('-') address = 'marketing/amazonSellerCentral/' + datelist[0] + '/' + datelist[ 1] + '/' importtos3.write_to_s3(data, 'paragon-datalake', address + 'Unshipped_Orders_Report' + date + '.csv')
def Flat_File_Feedback_Report(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=[ '_GET_SELLER_FEEDBACK_DATA_' ]) #,RequestedFromDate=yesterday,RequestedToDate=tomorrow) data = "" #print d for i in d.GetReportRequestListResult.ReportRequestInfo: reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) #print report data += report date = datetime.date.today() date = str(date) datelist = date.split('-') address = 'marketing/amazonSellerCentral/' + datelist[0] + '/' + datelist[ 1] + '/' importtos3.write_to_s3(data, 'paragon-datalake', address + "mws_performance" + date + ".csv")
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'CampaignName', 'CampaignId', 'CampaignStatus', 'AdGroupName', 'AdGroupStatus', 'AverageCpc', 'AverageCpe', 'AverageCpm', 'AverageCpv', 'Clicks', 'Conversions', 'ConversionRate', 'Cost', 'Ctr', 'CustomerDescriptiveName', 'Date', 'DayOfWeek', 'Device', 'VideoViews', 'VideoQuartile25Rate', 'VideoQuartile50Rate', 'VideoQuartile75Rate', 'VideoQuartile100Rate', 'Criteria', 'Status' ) #these are the fields that we are generating in our report .From('AGE_RANGE_PERFORMANCE_REPORT') #.Where('Status').In('ENABLED') #we can specify some criteria to filter the attributes selected above .During( date ) #.(DURING '20150201,20150301') is another to specify the date .Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) #we are priting the output and storing it in a CSV file f = open("age_report_" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/age_report/" + "age_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def main(client, date, full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = ( adwords.ReportQueryBuilder().Select( 'CampaignName', 'CampaignStatus', 'AdGroupName', 'AdGroupStatus', 'AverageCpc', 'AverageCpe', 'AverageCpm', 'AverageCpv', 'Clicks', 'Conversions', 'ConversionRate', 'Cost', 'Ctr', 'Date', 'DayOfWeek', 'Device', 'UnexpandedFinalUrlString', 'ExpandedFinalUrlString', 'Device', 'Impressions', 'Interactions', 'EngagementRate', 'PercentageMobileFriendlyClicks', 'PercentageValidAcceleratedMobilePagesClicks', 'SpeedScore', 'VideoViews') # we are pulling the above fileds to generate our report .From('LANDING_PAGE_REPORT') #.Where('Status').In('ENABLED') #to further filter our attributes being pulled using where clause .During( date ) #custom date can be specified (check age performance report for syntax) .Build()) x = (report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True)) #print(x) #we are printing the report and storing it in a CSV file f = open("landing_pg_report" + full_date + ".csv", "w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/landing_page_report/" + "landing_page_report_" + full_date + ".csv" importtos3.write_to_s3(x, 'paragon-datalake', filename)
def main(client,date,full_date): # Initialize appropriate service. report_downloader = client.GetReportDownloader(version='v201809') # Create report query. report_query = (adwords.ReportQueryBuilder() .Select('AccountDescriptiveName','AdGroupName','AdGroupStatus','AverageCpc','AverageCpe', 'AverageCpm','AverageCpv','AveragePageviews','AveragePosition','AverageTimeOnSite','BounceRate', 'CampaignName','CampaignStatus','ClickAssistedConversions','ClickAssistedConversionsOverLastClickConversions', 'ClickAssistedConversionValue','Clicks','Conversions','Cost','CostPerConversion', 'CriteriaDestinationUrl','Ctr','CustomerDescriptiveName','Date','DayOfWeek','Engagements', 'EngagementRate','EstimatedAddClicksAtFirstPositionCpc','EstimatedAddCostAtFirstPositionCpc', 'HistoricalCreativeQualityScore','HistoricalQualityScore','HistoricalSearchPredictedCtr', 'ImpressionAssistedConversions','ImpressionAssistedConversionsOverLastClickConversions', 'ImpressionAssistedConversionValue','Impressions','InteractionRate','Interactions','KeywordMatchType','PercentNewVisitors', 'QualityScore','SearchAbsoluteTopImpressionShare','SearchBudgetLostAbsoluteTopImpressionShare','SearchBudgetLostTopImpressionShare', 'SearchImpressionShare','SearchPredictedCtr','SearchRankLostAbsoluteTopImpressionShare','SearchRankLostImpressionShare', 'SearchRankLostTopImpressionShare','Status','SystemServingStatus','TopImpressionPercentage','TopOfPageCpc', 'VideoQuartile100Rate','VideoQuartile50Rate','VideoQuartile75Rate','VideoQuartile25Rate','VideoViewRate', 'VideoViews') # these are the fields we are using to generate our report .From('KEYWORDS_PERFORMANCE_REPORT') #name of the report #.Where('Status').In('ENABLED') #we can filter the attribiutes we are pulling using where clause .During(date) # custom date can be specified ('DURING 20150201,20150301') .Build()) x=(report_downloader.DownloadReportAsStringWithAwql( report_query, 'CSV', skip_report_header=True, skip_column_header=False, skip_report_summary=True, include_zero_impressions=True)) #print(x) #we are printing the report and storing it in a CSV file f=open("keyword_report"+full_date+".csv","w") f.write(x) f.close() #l = full_date.split("-") filename = "GoogleAds/landing/keyword_report/"+"keyword_report_"+full_date+".csv" importtos3.write_to_s3(x,'paragon-datalake',filename)
def Flat_File_Orders_By_Order_Date_Report(yesterday, tomorrow): d = mws.get_report_list( MaxCount=1, ReportTypeList=['_GET_FLAT_FILE_ALL_ORDERS_DATA_BY_ORDER_DATE_'], AvailableFromDate=yesterday + 'T14:32:16.50-07', AvailableToDate=tomorrow + 'T14:32:16.50-07') #print d data = "" for i in d.GetReportListResult.ReportInfo: #print "----------------",i.ReportType,i.ReportId,"-------------------------" if (i.ReportType != "FeedSummaryReport"): reportid = str(i.ReportId) report = mws.get_report(ReportId=reportid) data += report date = datetime.date.today() date = str(date) datelist = date.split('-') address = 'marketing/amazonSellerCentral/' + datelist[0] + '/' + datelist[ 1] + '/' importtos3.write_to_s3( data, 'paragon-datalake', address + 'Flat_File_Orders_By_Order_Date_Report' + date + '.csv')
def FBA_Amazon_Fulfilled_Shipments_Report(yesterday, tomorrow): d = mws.get_report_request_list( MaxCount=1, ReportTypeList=['_GET_AMAZON_FULFILLED_SHIPMENTS_DATA_'], RequestedFromDate=yesterday, RequestedToDate=tomorrow) #print d data = "" #.GetReportRequestListResult.ReportRequestInfo[0] for i in d.GetReportRequestListResult.ReportRequestInfo: if i.ReportProcessingStatus == '_DONE_NO_DATA_': data += "No data present for the request\n" continue reportid = str(i.GeneratedReportId) report = mws.get_report(ReportId=reportid) data += report date = datetime.date.today() date = str(date) datelist = date.split('-') #address='marketing/amazonSellerCentral/'+datelist[0]+'/'+datelist[1]+'/' address = 'amazonsellercentral/landing/fba/' importtos3.write_to_s3( data, 'paragon-datalake', address + 'FBA_Amazon_Fulfilled_Shipments_Report' + date + '.csv')
def device(): today = datetime.date.today() fields1 = [ 'account_name', 'campaign_name', 'campaign_id', 'reach', 'cost_per_action_type:post_reaction', 'action:post_reaction', 'unique_actions:post_reaction', 'inline_link_clicks', 'action:post_engagement', 'unique_outbound_clicks:outbound_click', 'cost_per_action_type:post_engagement', 'ctr', 'cost_per_unique_click', 'action:landing_page_view', 'unique_actions:landing_page_view', 'action:offsite_conversion.custom.281094269070676', 'cost_per_action_type:page_engagement', 'cost_per_unique_action_type:page_engagement', 'device_platform', 'action:offsite_conversion.custom.280669215754025', 'cost_per_unique_action_type:link_click', 'website_ctr:link_click', 'cpc', 'unique_actions:link_click', 'unique_actions:page_engagement', 'date_stop', 'unique_inline_link_clicks', 'cost_per_action_type:link_click', 'inline_post_engagement', 'unique_inline_link_click_ctr', 'cost_per_unique_action_type:landing_page_view', 'unique_link_clicks_ctr', 'action:page_engagement', 'cost_per_inline_post_engagement', 'action:link_click', 'cost_per_outbound_click:outbound_click', 'unique_ctr', 'cost_per_action_type:offsite_conversion.custom.280669215754025', 'impressions', 'unique_actions:post_engagement', 'cost_per_unique_action_type:post_engagement', 'inline_link_click_ctr', 'cost_per_action_type:landing_page_view', 'cost_per_action_type:offsite_conversion.custom.281094269070676', 'clicks', 'cost_per_unique_inline_link_click', 'outbound_clicks:outbound_click', 'date_start', 'instant_experience_clicks_to_open', 'unique_clicks', 'cost_per_unique_outbound_click:outbound_click', 'frequency', 'instant_experience_outbound_clicks', 'outbound_clicks_ctr:outbound_click', 'unique_outbound_clicks_ctr:outbound_click', 'spend', 'cpp', 'instant_experience_clicks_to_start', 'cpm', 'objective', 'cost_per_inline_link_click' ] for x in range(1, 8): date1 = today - datetime.timedelta(days=x) date1 = str(date1) arr = date1.split("-") year = arr[0] month = arr[1] params = { 'time_range': { 'since': date1, 'until': date1 }, 'breakdowns': ['device_platform'] } fields = [ 'account_name', 'campaign_name', 'campaign_id', 'reach', 'frequency', 'impressions', 'clicks', 'cpm', 'ctr', 'spend', 'actions', 'canvas_avg_view_percent', 'canvas_avg_view_time', 'conversion_rate_ranking', 'conversion_values', 'conversions', 'cost_per_action_type', 'cost_per_conversion', 'cost_per_estimated_ad_recallers', 'cost_per_inline_link_click', 'cost_per_inline_post_engagement', 'cost_per_outbound_click', 'cost_per_thruplay', 'cost_per_unique_action_type', 'cost_per_unique_click', 'cost_per_unique_inline_link_click', 'cost_per_unique_outbound_click', 'cpc', 'cpp', 'engagement_rate_ranking', 'estimated_ad_recall_rate', 'estimated_ad_recallers', 'full_view_impressions', 'full_view_reach', 'inline_link_click_ctr', 'inline_link_clicks', 'inline_post_engagement', 'instant_experience_clicks_to_open', 'instant_experience_clicks_to_start', 'instant_experience_outbound_clicks', 'mobile_app_purchase_roas', 'objective', 'outbound_clicks', 'outbound_clicks_ctr', 'place_page_name', 'quality_ranking', 'social_spend', 'unique_actions', 'unique_clicks', 'unique_ctr', 'unique_inline_link_click_ctr', 'unique_inline_link_clicks', 'unique_link_clicks_ctr', 'unique_outbound_clicks', 'unique_outbound_clicks_ctr', 'video_30_sec_watched_actions', 'video_avg_time_watched_actions', 'video_p100_watched_actions', 'video_p25_watched_actions', 'video_p50_watched_actions', 'video_p75_watched_actions', 'video_p95_watched_actions', 'video_play_actions', 'video_play_curve_actions', 'video_thruplay_watched_actions', 'website_ctr', ] f = open("account_id", "r") #importing the account id from external file acc = f.read() my_account = AdAccount(acc) campaignids = my_account.get_campaigns() #print(campaignids) list_d = [] l = [] y = 0 for i in range(len(campaignids)): #print("loop ran ", i) try: c_id = campaignids[i]["id"] campaign = Campaign(c_id) camp_insights = campaign.get_insights(fields=fields, params=params) j = 0 #print(camp_insights) dic_camp = {} for item in camp_insights: #converting to dictionary dic_camp = dict(item) #print("converted to dictionary") #print(dic_camp) #flattening of data try: for each_action in dic_camp["actions"]: dic_camp[ "action:" + each_action['action_type']] = each_action['value'] del dic_camp["actions"] except KeyError: continue try: for each_action in dic_camp["cost_per_action_type"]: dic_camp[ "cost_per_action_type:" + each_action['action_type']] = each_action['value'] del dic_camp["cost_per_action_type"] except KeyError: continue try: for each_action in dic_camp["cost_per_outbound_click"]: dic_camp[ "cost_per_outbound_click:" + each_action['action_type']] = each_action['value'] del dic_camp["cost_per_outbound_click"] except KeyError: continue try: for each_action in dic_camp["cost_per_unique_action_type"]: dic_camp[ "cost_per_unique_action_type:" + each_action['action_type']] = each_action['value'] del dic_camp["cost_per_unique_action_type"] except KeyError: continue try: for each_action in dic_camp[ "cost_per_unique_outbound_click"]: dic_camp[ "cost_per_unique_outbound_click:" + each_action['action_type']] = each_action['value'] del dic_camp["cost_per_unique_outbound_click"] except KeyError: continue try: for each_action in dic_camp["outbound_clicks"]: dic_camp[ "outbound_clicks:" + each_action['action_type']] = each_action['value'] del dic_camp["outbound_clicks"] except KeyError: continue try: for each_action in dic_camp["outbound_clicks_ctr"]: dic_camp[ "outbound_clicks_ctr:" + each_action['action_type']] = each_action['value'] del dic_camp["outbound_clicks_ctr"] except KeyError: continue try: for each_action in dic_camp["unique_actions"]: dic_camp[ "unique_actions:" + each_action['action_type']] = each_action['value'] del dic_camp["unique_actions"] except KeyError: continue try: for each_action in dic_camp["unique_outbound_clicks"]: dic_camp[ "unique_outbound_clicks:" + each_action['action_type']] = each_action['value'] del dic_camp["unique_outbound_clicks"] except KeyError: continue try: for each_action in dic_camp["unique_outbound_clicks_ctr"]: dic_camp[ "unique_outbound_clicks_ctr:" + each_action['action_type']] = each_action['value'] del dic_camp["unique_outbound_clicks_ctr"] except KeyError: continue try: for each_action in dic_camp["website_ctr"]: dic_camp[ "website_ctr:" + each_action['action_type']] = each_action['value'] del dic_camp["website_ctr"] except KeyError: continue #print(dic_camp) list_d.append(dic_camp) except IndexError: continue if list_d: filename1 = "FacebookAds/landing_device/" + "device_" + date1 + ".csv" filename2 = "FacebookAds/processing_device/csv/" + year + "/" + month + "/" + "publisher_device_" + date1 + ".csv" importtos3.write_to_s3(list_d, fields1, 'paragon-datalake', filename1, filename2) return list_d