Exemplo n.º 1
0
def report_list(auth, account):
    """ Lists all the DCM report configurations for an account given the current credentials.

  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/reports/list

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.

  Returns:
    * Iterator of JSONs.

  """

    account_id, advertiser_id, profile_id = parse_account(auth, account)
    service = get_service('dfareporting', API_VERSION, auth, uri_file=API_URI)
    next_page = None
    while next_page != '':
        if INTERNAL_MODE:
            response = API_Retry(service.reports().list(accountId=account_id,
                                                        profileId=profile_id,
                                                        pageToken=next_page))
        else:
            API_Retry(service.reports().list(profileId=profile_id,
                                             pageToken=next_page))
        next_page = response['nextPageToken']
        for report in response['items']:
            yield report
Exemplo n.º 2
0
def report_get(auth, report_id=None, name=None):
    """ Returns the DBM JSON definition of a report based on name or ID.
 
  Bulletproofing: https://developers.google.com/bid-manager/v1/queries/getquery

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.
    * account: (string) [account:advertiser@profile] token.
    * report_id: (int) ID of DCm report to fetch ( either or name ).
    * name: (string) Name of report to fetch ( either or report_id ).

  Returns:
    * JSON definition of report.

  """

    service = get_service('doubleclickbidmanager', API_VERSION, auth)
    if name:
        job = service.queries().listqueries()
        result = API_Retry(job)
        return ([
            query for query in result.get('queries', [])
            if query['metadata']['title'] == name
        ] or [None])[0]
    else:
        job = service.queries().getquery(queryId=report_id)
        return API_Retry(job)
Exemplo n.º 3
0
def report_delete(auth, account, report_id=None, name=None):
    """ Deletes a DCM report based on name or ID.

  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/reports/delete

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.
    * report_id: (int) ID of DCm report to fetch ( either or name ).
    * name: (string) Name of report to fetch ( either or report_id ).

  Returns:
    * None

  """

    account_id, advertiser_ids, profile_id = parse_account(auth, account)
    report = report_get(auth, account, report_id, name)
    if report:
        service = get_service('dfareporting',
                              API_VERSION,
                              auth,
                              uri_file=API_URI)
        if INTERNAL_MODE:
            API_Retry(service.reports().delete(accountId=account_id,
                                               profileId=profile_id,
                                               reportId=report['id']))
        else:
            API_Retry(service.reports().delete(profileId=profile_id,
                                               reportId=report['id']))
    else:
        if project.verbose: print 'DCM DELETE: No Report'
Exemplo n.º 4
0
def report_files(auth, account, report_id):
    """ Lists all the files available for a given DCM report configuration.

  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/files/list

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.
    * report_id: (int) DCM report identifier.

  Returns:
    * Iterator of JSONs.

  """

    account_id, advertiser_id, profile_id = parse_account(auth, account)
    service = get_service('dfareporting', API_VERSION, auth)
    next_page = None
    while next_page != '':
        if INTERNAL_MODE:
            response = API_Retry(service.reports().files().list(
                accountId=account_id,
                profileId=profile_id,
                reportId=report_id,
                pageToken=next_page))
        else:
            response = API_Retry(service.reports().files().list(
                profileId=profile_id, reportId=report_id, pageToken=next_page))
        next_page = response['nextPageToken']
        for report in response['items']:
            yield report
Exemplo n.º 5
0
def sheets_write(auth,
                 sheet_url_or_name,
                 sheet_tab,
                 sheet_range,
                 data,
                 append=False,
                 valueInputOption='RAW'):
    if project.verbose:
        print('SHEETS WRITE', sheet_url_or_name, sheet_tab, sheet_range)
    service = get_service('sheets', 'v4', auth)
    sheet_id = sheets_id(auth, sheet_url_or_name)
    range = sheets_tab_range(sheet_tab, sheet_range)
    body = {"values": list(data)}

    if append:
        API_Retry(service.spreadsheets().values().append(
            spreadsheetId=sheet_id,
            range=range,
            body=body,
            valueInputOption=valueInputOption,
            insertDataOption='OVERWRITE'))
    else:
        API_Retry(service.spreadsheets().values().update(
            spreadsheetId=sheet_id,
            range=range,
            body=body,
            valueInputOption=valueInputOption))
Exemplo n.º 6
0
def _email_find(service, email_from, email_to, date_min=None, date_max=None):
  query = 'from:%s AND to:%s' % (email_from, email_to)
  if date_min: query += ' AND after:%s' % date_to_str(date_min)
  if date_max: query += ' AND before:%s' % date_to_str(date_max + timedelta(days=1)) # make it inclusive
  if project.verbose: print('EMAIL SEARCH:', query)
  results = API_Retry(service.users().messages().list(userId='me', q=query))
  messages = results.get('messages', [])
  if project.verbose: print('EMAILS FOUND:', len(messages))
  return messages
Exemplo n.º 7
0
def report_build(auth, body):
    """ Creates a DBM report given a JSON definition.

  Bulletproofing: https://developers.google.com/bid-manager/v1/queries/createquery

  The report will be automatically run the first time.

  The body JSON provided will have the following fields added if not present:
    * schedule - set to run daily and expire in one year.
  
  Args:
    * auth: (string) Either user or service.
    * body: (json) As defined in: https://developers.google.com/doubleclick-advertisers/v3.2/reports#resource

  Returns:
    * JSON definition of report created or existing.

  """

    report = report_get(auth, name=body['metadata']['title'])

    if not report:
        service = get_service('doubleclickbidmanager', API_VERSION, auth)

        # add default daily schedule if it does not exist ( convenience )
        if "schedule" not in body:
            body['schedule'] = {
                "endTimeMs": long((time.time() + (365 * 24 * 60 * 60)) *
                                  1000),  # 1 year in future
                "frequency": "DAILY",
                "nextRunMinuteOfDay": 4 * 60,
                "nextRunTimezoneCode": body['timezoneCode']
            }

        #pprint.PrettyPrinter().pprint(body)

        # build report
        job = service.queries().createquery(body=body)
        report = API_Retry(job)

        # run report first time
        body = {
            "dataRange": report['metadata']['dataRange'],
            "timezoneCode": report['schedule']['nextRunTimezoneCode']
        }

        run = service.queries().runquery(queryId=report['queryId'], body=body)
        API_Retry(run)

    else:
        if project.verbose:
            print 'DBM Report Exists:', body['metadata']['title']

    return report
Exemplo n.º 8
0
def lineitem_read(auth, advertisers=[], insertion_orders=[], lineitems=[]):
  """ Reads line item configurations from DBM.
  
  Bulletproofing: https://developers.google.com/bid-manager/v1/lineitems/downloadlineitems 

  Args:
    * auth: (string) Either user or service.
    * advertisers (list) List of advertiser ids ( exclusive with insertion_orders and lineitems ).
    * insertion_orders (list) List of insertion_order ids ( exclusive with advertisers and lineitems ).
    * lineitems (list) List of ilineitem ids ( exclusive with insertion_orders and advertisers ).
  
  Returns:
    * Iterator of lists: https://developers.google.com/bid-manager/guides/entity-write/format

  """

  service = get_service('doubleclickbidmanager', API_VERSION, auth)

  body = {
    'format':'CSV',
    'fileSpec':'EWF'
  }

  if advertisers: 
    body['filterType'] = 'ADVERTISER_ID'
    body['filterIds'] = list(advertisers) # in case its a generator

  elif insertion_orders: 
    body['filterType'] = 'INSERTION_ORDER_ID'
    body['filterIds'] = list(insertion_orders) # in case its a generator

  elif lineitems: 
    body['filterType'] = 'LINE_ITEM_ID'
    body['filterIds'] = list(lineitems) # in case its a generator

  #print(body)

  result = API_Retry(service.lineitems().downloadlineitems(body=body))

  for count, row in enumerate(csv_to_rows(result.get('lineItems', ''))):
    if count == 0: continue # skip header
    row[0] = int(row[0] or 0) # LineItem ID
    row[2] = int(row[2] or 0) # Partner ID	
    row[11] = float(row[11] or 0) # IO Budget Amount
    row[18] = float(row[18] or 0) # LineItem Budget Amount
    row[21] = float(row[21] or 0) # LineItem Pacing Amount
    row[23] = int(row[23] or 0) # LineItem Frequency Exposures
    row[25] = int(row[25] or 0) # LineItem Frequency Amount
    row[26] = float(row[26] or 0) # Bid Price (CPM)
    row[28] = float(row[28] or 0) # Partner Revenue Amount
    yield row
Exemplo n.º 9
0
def query_to_rows(auth,
                  project_id,
                  dataset_id,
                  query,
                  row_max=None,
                  legacy=True):
    service = get_service('bigquery', 'v2', auth)

    # Create the query
    body = {
        "kind": "bigquery#queryRequest",
        "query": query,
        "timeoutMs": 10000,
        "dryRun": False,
        "useQueryCache": True,
        "useLegacySql": legacy
    }

    if row_max: body['maxResults'] = row_max

    if dataset_id:
        body['defaultDataset'] = {
            "projectId": project_id,
            "datasetId": dataset_id
        }

    response = API_Retry(service.jobs().query(projectId=project_id, body=body))

    while not response['jobComplete']:
        sleep(5)
        response = API_Retry(service.jobs().getQueryResults(
            projectId=project_id, jobId=response['jobReference']['jobId']))

    # Fetch all results
    row_count = 0
    while 'rows' in response:
        converters = _build_converter_array(response.get('schema', None), None,
                                            len(response['rows'][0].get('f')))
        for row in response['rows']:
            yield [
                converters[i](r.values()[0]) for i, r in enumerate(row['f'])
            ]  # may break if we attempt nested reads
            row_count += 1

        if 'PageToken' in response:
            response = API_Retry(service.jobs().getQueryResults(
                projectId=project_id,
                jobId=response['jobReference']['jobId'],
                pageToken=response['PageToken']))
        elif row_count < response['totalRows']:
            response = API_Retry(service.jobs().getQueryResults(
                projectId=project_id,
                jobId=response['jobReference']['jobId'],
                startIndex=row_count))
Exemplo n.º 10
0
def report_get(auth, account, report_id=None, name=None):
    """ Returns the DCM JSON definition of a report based on name or ID.
 
  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/reports/get

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.
    * account: (string) [account:advertiser@profile] token.
    * report_id: (int) ID of DCm report to fetch ( either or name ).
    * name: (string) Name of report to fetch ( either or report_id ).

  Returns:
    * JSON definition of report.

  """

    account_id, advertiser_ids, profile_id = parse_account(auth, account)
    service = get_service('dfareporting', API_VERSION, auth, uri_file=API_URI)
    report = None

    if name:
        next_page = None
        while next_page != '' and report is None:
            if INTERNAL_MODE:
                response = API_Retry(service.reports().list(
                    accountId=account_id,
                    profileId=profile_id,
                    pageToken=next_page))
            else:
                response = API_Retry(service.reports().list(
                    profileId=profile_id, pageToken=next_page))
            next_page = response['nextPageToken']
            for r in response['items']:
                if r['name'] == name:
                    report = r
                    break
    elif report_id:
        if INTERNAL_MODE:
            response = API_Retry(service.reports().get(accountId=account_id,
                                                       profileId=profile_id,
                                                       reportId=report_id))
        else:
            response = API_Retry(service.reports().get(profileId=profile_id,
                                                       reportId=report_id))
        #pprint.PrettyPrinter().pprint(response)

    return report
Exemplo n.º 11
0
def send_email(auth, email_to, email_from, email_cc, subject, text, html=None, attachment_filename=None, attachment_rows=None):
  if project.verbose: print('SENDING EMAIL', email_to)
  
  service = get_service('gmail', 'v1', auth)
  message = MIMEMultipart('alternative')
  message.set_charset('utf8')

  message['to'] = email_to
  message['cc'] = email_cc
  message['from'] = email_from
  message['subject'] = subject
  text_part = MIMEText(text, 'plain', 'UTF-8')
  message.attach(text_part)

  if html: 
    html_part = MIMEText(html, 'html', 'UTF-8')
    message.attach(html_part)
  
  if attachment_filename and attachment_rows:
    attachment = MIMEBase("text", "csv")
    attachment.set_payload(rows_to_csv(attachment_rows).read())
    attachment.add_header('Content-Disposition', 'attachment',filename=attachment_filename)
    encode_base64(attachment)
    message.attach(attachment)

  #API_Retry(service.users().messages().send(userId='me', body={'raw': base64.urlsafe_b64encode(message.as_string())}))
  API_Retry(service.users().messages().send(userId='me', body={'raw': base64.urlsafe_b64encode(message.as_bytes()).decode()}))
Exemplo n.º 12
0
  def _retry(self, job, retries=6, wait=2):
    """Replaced by core StarThinker retry logic, more resiliant for version 3.3 of the CM API.
   
    All timing parameters are ignored, now handled by replacement function.
    """

    return API_Retry(job)
Exemplo n.º 13
0
def lineitem_write(auth, rows, dry_run=True):
    """ Writes a list of lineitem configurations to DBM.

  Bulletproofing: https://developers.google.com/bid-manager/v1/lineitems/uploadlineitems

   Args:
    * auth: (string) Either user or service.
    * rows (iterator) List of lineitems: https://developers.google.com/bid-manager/guides/entity-write/format
    * dry_run (boolean) If set to True no write will occur, only a test of the upload for errors.
  
  Returns:
    * Results of upload.

  """

    service = get_service('doubleclickbidmanager', API_VERSION, auth)

    header = [s['name'] for s in LineItem_Write_Schema]

    body = {
        "lineItems": '%s\n%s' %
        (','.join(header), rows_to_csv(rows).read()),  # add header row
        "format": 'CSV',
        "dryRun": dry_run
    }

    job = service.lineitems().uploadlineitems(body=body)
    result = API_Retry(job)
    #print result
    return result
Exemplo n.º 14
0
def job_wait(auth, job):
    if job:
        if project.verbose:
            print('BIGQUERY JOB WAIT:', job['jobReference']['jobId'])

        request = API_BigQuery(auth).jobs().get(
            projectId=job['jobReference']['projectId'],
            jobId=job['jobReference']['jobId'])

        while True:
            sleep(5)
            if project.verbose:
                print('.', end='')
            sys.stdout.flush()
            result = API_Retry(request)
            if 'errors' in result['status']:
                raise Exception('BigQuery Job Error: %s' % ' '.join(
                    [e['message'] for e in result['status']['errors']]))
            elif 'errorResult' in result['status']:
                raise Exception('BigQuery Job Error: %s' %
                                result['status']['errorResult']['message'])
            elif result['status']['state'] == 'DONE':
                if project.verbose:
                    print('JOB COMPLETE:', result['id'])
                break
Exemplo n.º 15
0
def _email_attachments(service, message, attachment_regexp):

    file_filter = re.compile(r'%s' %
                             attachment_regexp) if attachment_regexp else None

    try:
        for part in message['payload']['parts']:

            if part['filename']:

                # filter regexp
                if not file_filter or file_filter.match(part['filename']):

                    if 'data' in part['body']:
                        data = part['body']['data']

                    else:
                        att_id = part['body']['attachmentId']
                        att = API_Retry(
                            service.users().messages().attachments().get(
                                userId='me',
                                messageId=message['id'],
                                id=att_id))
                        data = att['data']

                    file_data = BytesIO(
                        base64.urlsafe_b64decode(data.encode('UTF-8')))
                    yield part['filename'], file_data

    except errors.HttpError, e:
        print 'EMAIL ATTACHMENT ERROR:', str(e)
Exemplo n.º 16
0
def sheets_get(auth, sheet_url_or_name):
    sheet_id = sheets_id(auth, sheet_url_or_name)
    if sheet_id:
        service = get_service('sheets', 'v4', auth)
        return API_Retry(service.spreadsheets().get(spreadsheetId=sheet_id))
    else:
        return None
Exemplo n.º 17
0
def table_to_schema(auth, project_id, dataset_id, table_id):
    if project.verbose: print 'TABLE SCHEMA:', project_id, dataset_id, table_id
    service = get_service('bigquery', 'v2', auth)
    response = API_Retry(service.tables().get(projectId=project_id,
                                              datasetId=dataset_id,
                                              tableId=table_id))
    return response['schema']
Exemplo n.º 18
0
def sheets_tab_copy(auth,
                    from_sheet_url_or_name,
                    from_sheet_tab,
                    to_sheet_url_or_name,
                    to_sheet_tab,
                    overwrite=False):
    if project.verbose:
        print('SHEETS COPY', from_sheet_url_or_name, from_sheet_tab,
              to_sheet_url_or_name, to_sheet_tab)
    service = get_service('sheets', 'v4', auth)

    # convert human readable to ids
    from_sheet_id, from_tab_id = sheets_tab_id(auth, from_sheet_url_or_name,
                                               from_sheet_tab)
    to_sheet_id, to_tab_id = sheets_tab_id(auth, to_sheet_url_or_name,
                                           to_sheet_tab)

    # overwrite only if does not exist
    if overwrite or to_tab_id is None:

        # copy tab between sheets, the name changes to be "Copy of [from_sheet_tab]"
        copy_sheet = API_Retry(service.spreadsheets().sheets().copyTo(
            spreadsheetId=from_sheet_id,
            sheetId=from_tab_id,
            body={
                "destinationSpreadsheetId": to_sheet_id,
            }))

        body = {"requests": []}

        # if destination tab exists, delete it
        if to_tab_id:
            body['requests'].append({"deleteSheet": {"sheetId": to_tab_id}})

        # change the copy name to the designated name, remove "Copy of "
        body['requests'].append({
            "updateSheetProperties": {
                "properties": {
                    "sheetId": copy_sheet['sheetId'],
                    "title": to_sheet_tab
                },
                "fields": "title"
            }
        })

        API_Retry(service.spreadsheets().batchUpdate(spreadsheetId=to_sheet_id,
                                                     body=body))
Exemplo n.º 19
0
def file_find(auth, name, parent = None):
   drive = get_service('drive', 'v3', auth)

   query = "trashed = false and name = '%s'" % name
   if parent: query = "%s and '%s' in parents" % (query, parent)

   try: return (API_Retry(drive.files().list(q=query))['files'] or [None])[0]
   except HttpError: return None
Exemplo n.º 20
0
def file_delete(auth, name, parent = None):
   drive_file = file_find(auth, name, parent)

   if drive_file:
     drive = get_service('drive', 'v3', auth)
     API_Retry(drive.files().delete(fileId=drive_file['id']))
     return True
   else:
     return False
Exemplo n.º 21
0
def sheets_clear(auth, sheet_url_or_name, sheet_tab, sheet_range):
  if project.verbose:
    print('SHEETS CLEAR', sheet_url_or_name, sheet_tab, sheet_range)
  service = get_service('sheets', 'v4', auth)
  sheet_id = sheets_id(auth, sheet_url_or_name)
  API_Retry(service.spreadsheets().values().clear(
      spreadsheetId=sheet_id,
      range=sheets_tab_range(sheet_tab, sheet_range),
      body={}))
Exemplo n.º 22
0
def get_email_messages(auth, email_from, email_to,  subject_regexp=None, link_regexp=None, attachment_regexp=None, download=False, date_min=None, date_max=None):
  if project.verbose: print('GETTING EMAILS')
  service = get_service('gmail', 'v1', auth)
  messages = _email_find(service, email_from, email_to, date_min, date_max)
  subject_filter = re.compile(r'%s' % subject_regexp) if subject_regexp else None
  for message in messages:
    message = API_Retry(service.users().messages().get(userId='me', id=message['id']))
    if subject_filter is None or subject_filter.match(_get_subject(message)):
      yield _email_message(service, message, link_regexp, attachment_regexp, download)
Exemplo n.º 23
0
def sheets_write(auth, sheet_url, sheet_tab, sheet_range, data, valueInputOption='RAW'):
  if project.verbose: print 'SHEETS WRITE', sheet_url, sheet_tab, sheet_range
  service = get_service('sheets', 'v4', auth)
  sheet_id = sheets_id(sheet_url)
  range = sheets_tab_range(sheet_tab, sheet_range)
  body = {
    "values": list(data)
  }
  API_Retry(service.spreadsheets().values().update(spreadsheetId=sheet_id, range=range, body=body, valueInputOption=valueInputOption))
Exemplo n.º 24
0
def report_file(auth,
                account,
                report_id=None,
                name=None,
                timeout=60,
                chunksize=DCM_CHUNK_SIZE):
    """ Retrieves most recent DCM file by name or ID, if in progress, waits for it to complete.

  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/files/get

  Timeout is in minutes ( retries will happen at 1 minute interval, default total time is 60 minutes )
  If chunksize is set to 0 then the whole file is downloaded at once.

  Args:
    * auth: (string) Either user or service.
    * account: (string) [account:advertiser@profile] token.
    * report_id: (int) ID of DCm report to fetch ( either or name ).
    * name: (string) Name of report to fetch ( either or report_id ).
    * timeout: (int) Minutes to wait for in progress report before giving up.
    * chunksize: (int) number of bytes to download at a time, for memory constrained systems.

  Returns:
    * (filename, iterator) if file exists and is ready to download in chunks.
    * (filename, file) if file exists and chunking is off.
    * ('report_running.csv', None) if report is in progress.
    * (None, None) if file does not exist.

  """

    account_id, advertiser_id, profile_id = parse_account(auth, account)
    file_json = report_fetch(auth, account, report_id, name, timeout)

    if file_json == False:
        return None, None
    elif file_json == True:
        return 'report_running.csv', None
    else:
        service = get_service('dfareporting',
                              API_VERSION,
                              auth,
                              uri_file=API_URI)
        filename = '%s_%s.csv' % (file_json['fileName'],
                                  file_json['dateRange']['endDate'].replace(
                                      '-', ''))

        # streaming
        if chunksize:
            return filename, media_download(
                service.files().get_media(reportId=file_json['reportId'],
                                          fileId=file_json['id']), chunksize)

        # single object
        else:
            return filename, StringIO(
                API_Retry(service.files().get_media(
                    reportId=file_json['reportId'], fileId=file_json['id'])))
Exemplo n.º 25
0
def sheets_read(auth, sheet_url_or_name, sheet_tab, sheet_range, retries=10):
    if project.verbose:
        print('SHEETS READ', sheet_url_or_name, sheet_tab, sheet_range)
    service = get_service('sheets', 'v4', auth)
    sheet_id = sheets_id(auth, sheet_url_or_name)
    return API_Retry(service.spreadsheets().values().get(
        spreadsheetId=sheet_id, range=sheets_tab_range(sheet_tab,
                                                       sheet_range)),
                     'values',
                     retries=retries)
Exemplo n.º 26
0
def sheets_read(auth, sheet_url_or_name, sheet_tab, sheet_range='', retries=10):
  if project.verbose:
    print('SHEETS READ', sheet_url_or_name, sheet_tab, sheet_range)
  service = get_service('sheets', 'v4', auth)
  sheet_id = sheets_id(auth, sheet_url_or_name)
  if sheet_id is None:
    raise (OSError('Sheet does not exist: %s' % sheet_url_or_name))
  else:
    return API_Retry(
        service.spreadsheets().values().get(
            spreadsheetId=sheet_id,
            range=sheets_tab_range(sheet_tab, sheet_range)),
        'values',
        retries=retries)
Exemplo n.º 27
0
def sdf_read(auth, file_types, filter_type, filter_ids, version='3.1'):
    """ Read sdf file from DV360 api

  https://developers.google.com/bid-manager/v1/sdf/download

  Args:
    * auth:  (string) Either user or service.
    * file_types: (list[string]) List of the requested file types 
    * filter_type: (string) Value of the type of filter ids to be expected
    * filter_ids: (list[int]) Filter ids for the api call

  Returns:
    * Rows of the sdf files requested

  """
    service = get_service('doubleclickbidmanager', API_VERSION, auth)

    body = {
        "fileTypes": file_types,
        "filterType": filter_type,
        "filterIds": filter_ids,
        "version": version
    }

    result = API_Retry(service.sdf().download(body=body))

    # Clean the date field in each row
    for key in result.keys():
        date_idx_list = []
        for count, row in enumerate(csv_to_rows(result.get(key, ''))):
            # Get date fields from header
            if count == 0:
                date_idx_list = _get_idx_sdf_date_fields(row)
                yield row
            # Clean and yield data rows
            else:
                yield _clean_sdf_row(row, date_idx_list)
Exemplo n.º 28
0
def get_profile_for_api(auth, account_id):
    """Return a DCM profile ID for the currently supplied credentials.

  Bulletproofing: https://developers.google.com/doubleclick-advertisers/v3.2/userProfiles/get

  Handles cases of superuser, otherwise chooses the first matched profile.
  Allows DCM jobs to only specify account ID, which makes jobs more portable
  between accounts.

  Args:
    * auth: (string) Either user or service.
    * account_id: (int) Account number for which report is retrieved.

  Returns:
    * Is Superuser ( bool ): True if superuser account
    * Profile ID ( int ): profile id to be used to make API call
       
  Raises:
    * If current credentials do not have a profile for this account.

  """

    service = get_service('dfareporting', API_VERSION, auth, uri_file=API_URI)

    profile_admin = None
    profile_network = None

    for p in API_Retry(service.userProfiles().list())['items']:
        p_id = int(p['profileId'])
        a_id = int(p['accountId'])

        # take the first profile for admin
        if '@dcm' in p['userName']:
            profile_admin = p_id
            #elif '@dfa' in p['userName']: profile_admin = p_id
        elif a_id == 2515:
            profile_admin = p_id

        # try to find a network profile if exists
        if a_id == account_id:
            profile_network = p_id
            break

    if profile_admin: return True, profile_admin
    elif profile_network: return False, profile_network
    else:
        raise Exception('Add your user profile to DCM account %s.' %
                        account_id)
Exemplo n.º 29
0
def report_list(auth):
    """ Lists all the DBM report configurations for the current credentials.

  Bulletproofing: https://developers.google.com/bid-manager/v1/queries/listqueries

  Args:
    * auth: (string) Either user or service.

  Returns:
    * Iterator of JSONs.

  """

    service = get_service('doubleclickbidmanager', API_VERSION, auth)
    job = service.queries().listqueries()
    for query in API_Retry(job, 'queries'):
        yield query
Exemplo n.º 30
0
def sheets_tab_copy(auth, from_sheet_url, from_sheet_tab, to_sheet_url, to_sheet_tab):
  if project.verbose: print 'SHEETS COPY', from_sheet_url, from_sheet_tab, to_sheet_url, to_sheet_tab
  service = get_service('sheets', 'v4', auth)

  # convert human readable to ids
  from_sheet_id, from_tab_id = sheets_tab_id(auth, from_sheet_url, from_sheet_tab)
  to_sheet_id, to_tab_id = sheets_tab_id(auth, to_sheet_url, to_sheet_tab)

  # overwrite only if does not exist ( PROTECTION )
  if to_tab_id is None:
    # copy tab between sheets ( seriously, the name changes to be "Copy of [from_sheet_tab]" )
    request = API_Retry(service.spreadsheets().sheets().copyTo(spreadsheetId=from_sheet_id, sheetId=from_tab_id, body={
      "destinationSpreadsheetId": to_sheet_id,
    }))

    # change the name back ( remove "Copy of " )
    sheets_tab_rename(auth, to_sheet_url, 'Copy of %s' % from_sheet_tab, to_sheet_tab)