def main(client, image_filename):
  # Initialize appropriate service.
  media_service = client.GetMediaService(version='v201309')

  image_data = Utils.ReadFile(image_filename)
  image_data = base64.encodestring(image_data)

  # Construct media and upload image.
  media = [{
      'xsi_type': 'Image',
      'data': image_data,
      'type': 'IMAGE'
  }]
  media = media_service.Upload(media)[0]

  # Display results.
  if media:
    dimensions = Utils.GetDictFromMap(media['dimensions'])
    print ('Image with id \'%s\', dimensions \'%sx%s\', and MimeType \'%s\' was'
           ' uploaded.' % (media['mediaId'], dimensions['FULL']['height'],
                           dimensions['FULL']['width'], media['mimeType']))
  else:
    print 'No images were uploaded.'

  print
  print ('Usage: %s units, %s operations' % (client.GetUnits(),
                                             client.GetOperations()))
def main(client):
    # Initialize appropriate service.
    media_service = client.GetMediaService(version='v201406')

    # Construct selector and get all images.
    offset = 0
    selector = {
        'fields': ['MediaId', 'Type', 'Width', 'Height', 'MimeType'],
        'predicates': [{
            'field': 'Type',
            'operator': 'IN',
            'values': ['IMAGE', 'VIDEO']
        }],
        'paging': {
            'startIndex': str(offset),
            'numberResults': str(PAGE_SIZE)
        }
    }
    more_pages = True
    while more_pages:
        page = media_service.Get(selector)[0]

        # Display results.
        if 'entries' in page:
            for media in page['entries']:
                # Handle image
                if media.get('dimensions'):
                    dimensions = Utils.GetDictFromMap(media['dimensions'])
                    print(
                        'Image with id \'%s\', dimensions \'%sx%s\', and MimeType'
                        ' \'%s\' was found.' %
                        (media['mediaId'], dimensions['FULL']['height'],
                         dimensions['FULL']['width'], media['mimeType']))
                else:  # Handle video
                    print 'Video with id \'%s\' was found.' % media['mediaId']
        else:
            print 'No images/videos were found.'
        offset += PAGE_SIZE
        selector['paging']['startIndex'] = str(offset)
        more_pages = offset < int(page['totalNumEntries'])

    print
    print('Usage: %s units, %s operations' %
          (client.GetUnits(), client.GetOperations()))
示例#3
0
# Import appropriate classes from the client library.
from adspygoogle.adwords.AdWordsClient import AdWordsClient
from adspygoogle.common import Utils

# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..'))

# Initialize appropriate service.
media_service = client.GetMediaService('https://adwords-sandbox.google.com',
                                       'v201003')

# Construct selector and get all images.
selector = {'mediaType': 'IMAGE'}
images = media_service.Get(selector)[0]

# Display results.
if 'media' in images:
    for image in images['media']:
        dimensions = Utils.GetDictFromMap(image['dimensions'])
        print(
            'Image with id \'%s\', dimensions \'%sx%s\', and MIME type \'%s\' '
            'was found.' % (image['mediaId'], dimensions['FULL']['height'],
                            dimensions['FULL']['width'], image['mimeType']))
else:
    print 'No images were found.'

print
print('Usage: %s units, %s operations' %
      (client.GetUnits(), client.GetOperations()))
示例#4
0
# Initialize appropriate service.
media_service = client.GetMediaService(
    'https://adwords-sandbox.google.com', 'v201101')

image_data = Utils.ReadFile('INSERT_IMAGE_PATH_HERE')
if client.soap_lib == SOAPPY:
  image_data = base64.encodestring(image_data)

# Construct media and upload image.
media = [{
    # TODO: SOAPpy needs an xsi_type here, but SOAPpy has other problems
    'data': image_data,
    'type': 'IMAGE'
}]
media = media_service.Upload(media)[0]

# Display results.
if media:
  dimensions = Utils.GetDictFromMap(media['dimensions'])
  # WARNING: SOAPpy turns the dimensions field into a list, so this won't work
  print ('Image with id \'%s\', dimensions \'%sx%s\', and MIME type \'%s\' was '
         'uploaded.' % (media['mediaId'], dimensions['FULL']['height'],
                        dimensions['FULL']['width'], media['mimeType']))
else:
  print 'No images were uploaded.'

print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
                                           client.GetOperations()))
def get_keyword_info(keyword, mode='BROAD'):
	selector = {
		'searchParameters': [{
			'type': 'RelatedToKeywordSearchParameter',
			'keywords': [{
				'text': keyword,
				'matchType': mode
			}]
		},{
		   'type': 'KeywordMatchTypeSearchParameter',
		   'keywordMatchTypes': [mode]
		},{
		   'type': 'LanguageTargetSearchParameter',
		   'languageTargets': [{'languageCode':'it'}]
		},{
		   'type': 'CountryTargetSearchParameter',
		   'countryTargets': [{'countryCode':'IT'}]
		}],
		'ideaType': 'KEYWORD',
		'requestType': 'STATS',
		'requestedAttributeTypes': ['GLOBAL_MONTHLY_SEARCHES', 'AVERAGE_TARGETED_MONTHLY_SEARCHES'],
		'paging': {
			'startIndex': '0',
			'numberResults': '1000'
		}
	}
	
	selector_estimator_service = {
		'campaignEstimateRequests': [{
			'adGroupEstimateRequests': [{
				'keywordEstimateRequests': [
					{
						'keyword': {
							'xsi_type': 'Keyword',
							'matchType': mode,
							'text': keyword
						}
					}
				],
				'maxCpc': {
					'xsi_type': 'Money',
					'microAmount': '1000000'
				}
			}],
			'targets': [
				{
					'xsi_type': 'CountryTarget',
					'countryCode': 'IT'
				},
				{
					'xsi_type': 'LanguageTarget',
					'languageCode': 'it'
				}
			]
		}]
	}
	
	estimates = None
	ret = None	
	
	ret = targeting_idea_service.Get(selector)[0]
	#do not get cpc for now
	#estimates = traffic_estimator_service.Get(selector_estimator_service)[0]

	global_searches = None
	regional_searches = None
	if ret is not None and 'entries' in ret and ret['entries']:
	  for key in ret['entries']:
			data = Utils.GetDictFromMap(key['data'])
			if 'value' in data['GLOBAL_MONTHLY_SEARCHES']:
				global_searches = int(data['GLOBAL_MONTHLY_SEARCHES']['value'])
			if 'value' in data['AVERAGE_TARGETED_MONTHLY_SEARCHES']:
				regional_searches = int(data['AVERAGE_TARGETED_MONTHLY_SEARCHES']['value'])
	
	if estimates is not None:
	  ad_group_estimate = estimates['campaignEstimates'][0]['adGroupEstimates'][0]
	  keyword_estimates = ad_group_estimate['keywordEstimates']
	  for index in xrange(len(keyword_estimates)):
		estimate = keyword_estimates[index]

		# Find the mean of the min and max values.
		mean_avg_cpc = (long(estimate['max']['averageCpc']['microAmount']) +
						long(estimate['max']['averageCpc']['microAmount'])) / 2
		mean_avg_pos = (float(estimate['min']['averagePosition']) +
						float(estimate['max']['averagePosition'])) / 2
		mean_clicks = (float(estimate['min']['clicksPerDay']) +
					   float(estimate['max']['clicksPerDay'])) / 2
		mean_total_cost = (long(estimate['min']['totalCost']['microAmount']) +
						   long(estimate['max']['totalCost']['microAmount'])) / 2

		print ('Results for the keyword with text \'%s\' and match type \'%s\':'
			   % (keyword, keyword))
		print '  Estimated average CPC: %s' % mean_avg_cpc
		print '  Estimated ad position: %s' % mean_avg_pos
		print '  Estimated daily clicks: %s' % mean_clicks
		print '  Estimated daily cost: %s' % mean_total_cost
	  
	return dict(global_searches = global_searches, regional_searches = regional_searches)