コード例 #1
0
def get_user_permissions(profile_id: str) -> List[Dict]:
  """Get user assigned roles from DV360."""
  service = _get_service()
  page_token = None
  user_profiles = []

  # We need to use page token because max is 1000 results only.
  while True:
    response = service.accountUserProfiles().list(
        profileId=profile_id,
        pageToken=page_token,
    ).execute()
    account_user_profiles = response['accountUserProfiles']
    for i, profile in enumerate(account_user_profiles):
      # If debug enabled, show the first 3 entries.
      if i < 3:
        if logging.get_verbosity() == 1:  # Debug.
          logging.debug('Debugging enabled, showing first 3 entries.')
          pp = pprint.PrettyPrinter(indent=2)
          logging.debug(pp.pformat(profile))

      user_profiles.append({
          'account_id':
              profile.get('accountId', 'NA'),
          'subaccount_id':
              profile.get('subaccountId', 'NA'),
          'email':
              util.hash_single(profile['email']),
          'advertisers':
              ','.join(profile['advertiserFilter'].get('objectIds', [])),
          'advertiser_status':
              profile['advertiserFilter']['status'],
          'campaigns':
              ','.join(profile['campaignFilter'].get('objectIds', [])),
          'campaign_status':
              profile['campaignFilter']['status'],
      })

    page_token = response.get('nextPageToken')
    if not page_token:
      break

  return user_profiles
コード例 #2
0
  def test_grad_of_jit_compilation_caching(self):
    if not hasattr(self, "assertLogs"):
      raise unittest.SkipTest("test requires assertLogs (python 3)")

    lax.add(1, 2)  # make sure some initial warnings are already printed

    sin = api.jit(np.sin)

    prev_level = logging.get_verbosity()
    try:
      logging.set_verbosity('DEBUG')
      with self.assertLogs(level=logging.DEBUG) as l:
        ans1 = api.grad(sin)(2.)
        ans2 = api.grad(sin)(3.)
    finally:
      logging.set_verbosity(prev_level)
    self.assertLen(l.output, 2)

    self.assertAllClose(ans1, onp.cos(2.), check_dtypes=False)
    self.assertAllClose(ans2, onp.cos(3.), check_dtypes=False)
コード例 #3
0
 def _list_functions_for_serialization(self, unused_serialization_cache):
   """Return a dict of `Function`s of a trackable."""
   functions = {}
   for attribute_name in dir(self):
     # We get the attributes, suppressing warnings and exceptions.
     logging_verbosity = logging.get_verbosity()
     try:
       logging.set_verbosity(logging.FATAL)
       attribute_value = getattr(self, attribute_name, None)
     except Exception:  # pylint: disable=broad-except
       # We really don't want to throw an exception just because some object's
       # attribute accessor is broken.
       attribute_value = None
     finally:
       # We reset the verbosity setting in a `finally` block, to make
       # sure it always happens, even if we make the exception catching above
       # be less broad.
       logging.set_verbosity(logging_verbosity)
     if isinstance(attribute_value, (def_function.Function,
                                     defun.ConcreteFunction)):
       functions[attribute_name] = attribute_value
   return functions
コード例 #4
0
ファイル: logutil.py プロジェクト: 50417/DeepFuzzSL
def StartTeeLogsToFile(program_name: str = None,
                       log_dir: str = None,
                       file_log_level: int = logging.DEBUG) -> None:
    """Log messages to file as well as stderr.

  Args:
    program_name: The name of the program.
    log_dir: The directory to log to.
    file_log_level: The minimum verbosity level to log to file to.

  Raises:
    FileNotFoundError: If the requested log_dir does not exist.
  """
    if not pathlib.Path(log_dir).is_dir():
        raise FileNotFoundError(f"Log directory not found: '{log_dir}'")
    old_verbosity = logging.get_verbosity()
    logging.set_verbosity(file_log_level)
    logging.set_stderrthreshold(old_verbosity)
    logging.get_absl_handler().start_logging_to_file(program_name, log_dir)
    # The Absl logging handler function start_logging_to_file() sets logtostderr
    # to False. Re-enable whatever value it was before the call.
    FLAGS.logtostderr = False
コード例 #5
0
def update_report(profile_id: str, report: dict):
  """Update existing report.

  Updates an existing report.

  Args:
    profile_id: The profile id of the user owning the report.
    report: The report object.

  Returns:
    The report object of the updated report.
  """
  service = _get_service()
  response = service.reports().update(
      profileId=profile_id,
      reportId=report['id'],
      body=report,
  ).execute()
  if logging.get_verbosity() == 1:  # Debug.
    pp = pprint.PrettyPrinter(indent=2)
    logging.debug(pp.pformat(response))
  logging.info('Updated report with id: %s for report name: %s', response['id'],
               report['name'])
  return response
コード例 #6
0
  def test_set_verbosity_strings(self):
    old_level = logging.get_verbosity()

    # Lowercase names.
    logging.set_verbosity('debug')
    self.assertEquals(logging.get_verbosity(), logging.DEBUG)
    logging.set_verbosity('info')
    self.assertEquals(logging.get_verbosity(), logging.INFO)
    logging.set_verbosity('warning')
    self.assertEquals(logging.get_verbosity(), logging.WARNING)
    logging.set_verbosity('warn')
    self.assertEquals(logging.get_verbosity(), logging.WARNING)
    logging.set_verbosity('error')
    self.assertEquals(logging.get_verbosity(), logging.ERROR)
    logging.set_verbosity('fatal')

    # Uppercase names.
    self.assertEquals(logging.get_verbosity(), logging.FATAL)
    logging.set_verbosity('DEBUG')
    self.assertEquals(logging.get_verbosity(), logging.DEBUG)
    logging.set_verbosity('INFO')
    self.assertEquals(logging.get_verbosity(), logging.INFO)
    logging.set_verbosity('WARNING')
    self.assertEquals(logging.get_verbosity(), logging.WARNING)
    logging.set_verbosity('WARN')
    self.assertEquals(logging.get_verbosity(), logging.WARNING)
    logging.set_verbosity('ERROR')
    self.assertEquals(logging.get_verbosity(), logging.ERROR)
    logging.set_verbosity('FATAL')
    self.assertEquals(logging.get_verbosity(), logging.FATAL)

    # Integers as strings.
    logging.set_verbosity(str(logging.DEBUG))
    self.assertEquals(logging.get_verbosity(), logging.DEBUG)
    logging.set_verbosity(str(logging.INFO))
    self.assertEquals(logging.get_verbosity(), logging.INFO)
    logging.set_verbosity(str(logging.WARNING))
    self.assertEquals(logging.get_verbosity(), logging.WARNING)
    logging.set_verbosity(str(logging.ERROR))
    self.assertEquals(logging.get_verbosity(), logging.ERROR)
    logging.set_verbosity(str(logging.FATAL))
    self.assertEquals(logging.get_verbosity(), logging.FATAL)

    logging.set_verbosity(old_level)
コード例 #7
0
    def _discretization(self, displacement: tf.Tensor,
                        current_tokens: tf.Tensor) -> tf.Tensor:
        r"""Converts a displacement vector to the ids of the best tokens.

    Discretizes the displacement vector into a score over tokens by using the
    method described in Hotflip.
    (Ebrahimi et al. 2018) https://arxiv.org/abs/1712.06751

    \frac{\partial L}{\partial indices} &=
      \frac{\partial L}{\partial embeddings}
      x \frac{\partial embeddings}{\partial indices}
      &= \frac{\partial L}{\partial embeddings} x embeddings

    \frac{\partial L}{\partial embeddings} is our averaged displacement vectors,
    so we just need to multiply the displacement vectors with our embedding
    matrix.

    Args:
      displacement: The expected scaled displacement over sampled replacements.
        <float32>[batch_size, embedding_dimension]
      current_tokens: The indices of the tokens we are currently at.
        <int32>[batch_size, 1]

    Returns:
      A <int32>[batch_size] tensor containing the index of the best replacement
        token for each sentence in the batch.
    """
        # Get dot-product similarity between displacement vectors and embeddings.
        # displacement is a matrix [batch_size, emb_dim] and embeddings is
        # [vocab_size, emb_dim]. embeddings needs to be transposed so that we have
        # [batch_size, emb_dim] @ [emb_dim, vocab_size] = [batch_size, vocab_size].
        displacement_token_similarities = tf.matmul(displacement,
                                                    self._embeddings,
                                                    transpose_b=True)

        if self._discretize_by_cosine:
            displacement_token_similarities = self._normalize_by_magnitudes(
                displacement, displacement_token_similarities)

        # displacement_to_original_similarity is [batch_size, 1].
        displacement_to_original_similarity = tf.gather(
            displacement_token_similarities,
            current_tokens,
            batch_dims=1,
            axis=-1)
        # score_diff is [batch_size, vocab_size] and lowers displacement
        # similarities by the similarity of the current tokens.
        score_diff = (displacement_token_similarities -
                      displacement_to_original_similarity)

        if self._special_token_mask is not None:
            score_diff = tf.where(self._special_token_mask, float('-inf'),
                                  score_diff)
        # This sets scores for current tokens to -inf so they can't be predicted.
        score_diff = self.scatter_helper(
            score_diff, current_tokens,
            tf.fill((score_diff.shape[0], ), float('-inf')))
        # Log the k top scoring replacement tokens.
        if (self._vocab and logging.get_verbosity() == logging.DEBUG
                and tf.executing_eagerly()):
            top_k_indices = tf.nn.top_k(score_diff, k=5)[1].numpy().tolist()
            top_k_tokens = []
            for example in top_k_indices:
                top_k_tokens.append([self._vocab[index] for index in example])
            logging.debug('Top K tokens in discretization:\n %s', top_k_tokens)
        # new_candidates are [batch_size,] and are vocab items most similar to
        # displacement vectors after subtracting the scores of the current tokens.
        new_candidates = tf.argmax(score_diff, axis=-1, output_type=tf.int32)
        # We need new candidates to be [batch_size, 1] for future scatter updates.
        return tf.expand_dims(new_candidates, -1)
コード例 #8
0
def main(argv):
    import tensorflow as tf  # need to be here to have the env variables defined
    tf.get_logger().propagate = False

    # masking error related to cache
    logger.getLogger('googleapiclient.discovery_cache').setLevel(logger.ERROR)

    # set level of verbosity
    if FLAGS.verbosity_level == 'DEBUG':
        logging.set_verbosity(logging.DEBUG)
        print('logging.DEBUG')
    elif FLAGS.verbosity_level == 'INFO':
        logging.set_verbosity(logging.INFO)
    elif FLAGS.verbosity_level == 'WARNING':
        logging.set_verbosity(logging.WARNING)
    elif FLAGS.verbosity_level == 'ERROR':
        logging.set_verbosity(logging.ERROR)
    elif FLAGS.verbosity_level == 'FATAL':
        logging.set_verbosity(logging.FATAL)
    else:
        logging.set_verbosity(logging.INFO)

    # set level of verbosity for Tensorflow
    if FLAGS.verbosity_level == 'VERBOSE':
        tf.debugging.set_log_device_placement(True)
        tf.autograph.set_verbosity(10, alsologtostdout=False)

    # logger.getLogger('googleapiclient.discovery_cache').setLevel(logging.ERROR)

    # fmt = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
    fmt = "[%(levelname)s] %(message)s"
    formatter = logger.Formatter(fmt)
    logging.get_absl_handler().setFormatter(formatter)
    logging.get_absl_handler().python_handler.stream = sys.stdout
    logging.set_stderrthreshold(logging.WARNING)

    # level_log = 'INFO'

    # # Instantiates a client
    # client = google.cloud.logging.Client()
    #
    # # Connects the logger to the root logging handler; by default this captures
    # # all logs at INFO level and higher
    # client.setup_logging(log_level=FLAGS.verbosity)
    #
    # print('loggerDict:', logger.root.manager.loggerDict.keys())
    #
    # for i in logger.root.manager.loggerDict.keys():
    #     if i=='tensorflow':
    #        #print('-> propagate False')
    #         logger.getLogger(i).propagate = False  # needed
    #     elif i=='google.auth':
    #         logger.getLogger(i).propagate = False  # needed
    #     elif i=='google_auth_httplib2':
    #         logger.getLogger(i).propagate = False  # needed
    #     elif i=='pyasn1':
    #         logger.getLogger(i).propagate = False  # needed
    #     elif i=='sklearn':
    #         logger.getLogger(i).propagate = False  # needed
    #     elif i=='google.cloud':
    #         logger.getLogger(i).propagate = False  # needed
    #     else:
    #         logger.getLogger(i).propagate = True # needed
    #     handler = logger.getLogger(i).handlers
    #     if handler != []:
    #         #print("logger's name=", i,handler)
    #         for h in handler:
    #             #print('    -> ', h)
    #             if h.__class__ == logger.StreamHandler:
    #                 #print('    -> name=', h.__class__)
    #                 h.setStream(sys.stdout)
    #                 h.setLevel(level_log)
    #                 #print('    --> handlers =', h)
    #
    root_logger = logger.getLogger()
    # root_logger.handlers=[handler for handler in root_logger.handlers if isinstance(handler, (CloudLoggingHandler, ContainerEngineHandler, logging.ABSLHandler))]
    #
    for handler in root_logger.handlers:
        print("----- handler ", handler)
        print("---------class ", handler.__class__)

    #     if handler.__class__ == CloudLoggingHandler:
    #         handler.setStream(sys.stdout)
    #         handler.setLevel(level_log)
    #     if handler.__class__ == logging.ABSLHandler:
    #         handler.python_handler.stream = sys.stdout
    #         handler.setLevel(level_log)
    # #        handler.handler.setStream(sys.stdout)
    #
    # for handler in root_logger.handlers:
    #     print("----- handler ", handler)
    #
    # # Instantiates a client
    # #client = google.cloud.logging.Client()
    #
    # # Connects the logger to the root logging handler; by default this captures
    # # all logs at INFO level and higher
    # #client.setup_logging()
    #
    # # redirect abseil logging messages to the stdout stream
    # #logging.get_absl_handler().python_handler.stream = sys.stdout
    #
    # # some test
    # #tf.get_logger().addHandler(logger.StreamHandler(sys.stdout))
    # #tf.get_logger().disabled = True
    # #tf.autograph.set_verbosity(5 ,alsologtostdout=True)
    #
    # ## DEBUG
    # #fmt = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
    # #formatter = logger.Formatter(fmt)
    # #logging.get_absl_handler().setFormatter(formatter)
    #
    # # set level of verbosity
    # #logging.set_verbosity(logging.DEBUG)
    #
    # print(' 0 print --- ')
    # logging.info(' 1 logging:')
    # logging.info(' 2 logging:')
    #
    # print(' 3 print --- ')
    # logging.debug(' 4 logging-test-debug')
    # logging.info(' 5 logging-test-info')
    # logging.warning(' 6 logging-test-warning')
    # logging.error(' 7 logging test-error')
    # print(' 8 print --- ')
    # #_=BertTokenizer.from_pretrained('bert-base-uncased')
    # print(' 9 print --- ')
    # _= tf.distribute.MirroredStrategy()
    # print('10 print --- ')
    # ## DEBUG

    print('logging.get_verbosity()', logging.get_verbosity())

    # print flags
    abseil_flags = [
        'logtostderr', 'alsologtostderr', 'log_dir', 'v', 'verbosity',
        'stderrthreshold', 'showprefixforinfo', 'run_with_pdb',
        'pdb_post_mortem', 'run_with_profiling', 'profile_file',
        'use_cprofile_for_profiling', 'only_check_args', 'flagfile', 'undefok'
    ]
    logging.info('-- Custom flags:')
    for name in list(FLAGS):
        if name not in abseil_flags:
            logging.info('custom flags: {:40} with value: {:50}'.format(
                name, str(FLAGS[name].value)))
    logging.info('\n-- Abseil flags:')
    for name in list(FLAGS):
        if name in abseil_flags:
            logging.info('abseil flags: {:40} with value: {:50}'.format(
                name, str(FLAGS[name].value)))

    if os.environ.get('LOG_FILE_TO_WRITE') is not None:
        logging.info('os.environ[LOG_FILE_TO_WRITE]: {}'.format(
            os.environ['LOG_FILE_TO_WRITE']))
        # split_path = os.environ['LOG_FILE_TO_WRITE'].split('/')
        # logging.get_absl_handler().use_absl_log_file(split_path[-1], '/'.join(split_path[:-1]))

    # fmt = "[%(levelname)s %(asctime)s %(filename)s:%(lineno)s] %(message)s"
    # formatter = logger.Formatter(fmt)
    # logging.get_absl_handler().setFormatter(formatter)

    # set level of verbosity
    # logging.set_verbosity(FLAGS.verbosity)
    # logging.set_stderrthreshold(FLAGS.verbosity)

    logging.info(tf.__version__)
    logging.info(tf.keras.__version__)
    logging.info(list(FLAGS))
    logging.debug('flags: \n {}'.format(FLAGS))
    logging.debug('env variables: \n{}'.format(os.environ))
    logging.debug('current dir: {}'.format(os.getcwd()))
    logging.debug('__package__: {}'.format(__package__))
    logging.debug('__name__: {}'.format(__name__))
    logging.debug('__file__: {}'.format(__file__))

    # only for HP tuning!
    if os.environ.get('CLOUD_ML_HP_METRIC_TAG') is not None:
        logging.info('this is a hyper parameters job !')

        # setup the hp flag
        FLAGS.is_hyperparameter_tuning = True
        logging.info('FLAGS.is_hyperparameter_tuning: {}'.format(
            FLAGS.is_hyperparameter_tuning))

        logging.info('os.environ[CLOUD_ML_HP_METRIC_TAG]: {}'.format(
            os.environ['CLOUD_ML_HP_METRIC_TAG']))
        logging.info('os.environ[CLOUD_ML_HP_METRIC_FILE]: {}'.format(
            os.environ['CLOUD_ML_HP_METRIC_FILE']))
        logging.info('os.environ[CLOUD_ML_TRIAL_ID]: {}'.format(
            os.environ['CLOUD_ML_TRIAL_ID']))

        # variable name for hyper parameter tuning
        metric_accuracy = os.environ['CLOUD_ML_HP_METRIC_TAG']
        logging.info('metric accuracy name: {}'.format(metric_accuracy))
    else:
        metric_accuracy = 'NotDefined'

    if os.environ.get('TF_CONFIG') is not None:
        logging.info('os.environ[TF_CONFIG]: {}'.format(
            os.environ['TF_CONFIG']))
    else:
        logging.error('os.environ[TF_CONFIG] doesn\'t exist !')

    if FLAGS.use_tpu:
        # Check or update the TensorFlow on the TPU cluster to match the one of the VM
        logging.info(
            'setting up TPU: check that TensorFlow version is the same on the VM and on the TPU cluster'
        )
        client_tpu = Client()

        # define TPU strategy before any ops
        client_tpu.configure_tpu_version(tf.__version__,
                                         restart_type='ifNeeded')
        logging.info('setting up TPU: cluster resolver')
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        )
        logging.info('setting up TPU: \n {}'.format(tpu_cluster_resolver))
        logging.info('running on TPU: \n {}'.format(
            tpu_cluster_resolver.cluster_spec().as_dict()['worker']))
        tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
        tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
        strategy = tf.distribute.experimental.TPUStrategy(tpu_cluster_resolver)
    else:
        strategy = tf.distribute.MirroredStrategy()
        print('do nothing')
    logging.info('Number of devices: {}'.format(strategy.num_replicas_in_sync))

    # choose language's model and tokenizer
    MODELS = [(TFBertModel, BertTokenizer, 'bert-base-multilingual-uncased')]
    model_index = 0  # BERT
    # model_class = MODELS[model_index][0]  # i.e TFBertModel
    # tokenizer_class = MODELS[model_index][1]  # i.e BertTokenizer
    pretrained_weights = MODELS[model_index][
        2]  # 'i.e bert-base-multilingual-uncased'

    # download   pre trained model:
    if FLAGS.pretrained_model_dir:
        # download pre trained model from a bucket
        logging.info('downloading pretrained model!')
        search = re.search('gs://(.*?)/(.*)', FLAGS.pretrained_model_dir)
        if search is not None:
            bucket_name = search.group(1)
            blob_name = search.group(2)
            local_path = '.'
            mu.download_blob(bucket_name, blob_name, local_path)
            pretrained_model_dir = local_path + '/' + blob_name
        else:
            pretrained_model_dir = FLAGS.pretrained_model_dir
    else:
        # download pre trained model from internet
        pretrained_model_dir = '.'

    # some check
    logging.info('Batch size:            {:6}/{:6}'.format(
        FLAGS.batch_size_train, FLAGS.batch_size_eval))
    logging.info('Step per epoch:        {:6}/{:6}'.format(
        FLAGS.steps_per_epoch_train, FLAGS.steps_per_epoch_eval))
    logging.info('Total number of batch: {:6}/{:6}'.format(
        FLAGS.steps_per_epoch_train * (FLAGS.epochs + 1),
        FLAGS.steps_per_epoch_eval * 1))

    # with tf.summary.create_file_writer(FLAGS.output_dir,
    #                                   filename_suffix='.oup',
    #                                   name='test').as_default():
    #    tf.summary.scalar('metric_accuracy', 1.0, step=1)
    # print('-- 00001')
    #  read TFRecords files, shuffle, map and batch size
    train_dataset = tf_bert.build_dataset(FLAGS.input_train_tfrecords,
                                          FLAGS.batch_size_train, 2048)
    valid_dataset = tf_bert.build_dataset(FLAGS.input_eval_tfrecords,
                                          FLAGS.batch_size_eval, 2048)

    # set repeat
    train_dataset = train_dataset.repeat(FLAGS.epochs + 1)
    valid_dataset = valid_dataset.repeat(2)

    # reset all variables used by Keras
    tf.keras.backend.clear_session()

    # create and compile the Keras model in the context of strategy.scope
    with strategy.scope():
        logging.debug('pretrained_model_dir={}'.format(pretrained_model_dir))
        model = tf_bert.create_model(pretrained_weights,
                                     pretrained_model_dir=pretrained_model_dir,
                                     num_labels=FLAGS.num_classes,
                                     learning_rate=FLAGS.learning_rate,
                                     epsilon=FLAGS.epsilon)
    # train the model
    tf_bert.train_and_evaluate(model,
                               num_epochs=FLAGS.epochs,
                               steps_per_epoch=FLAGS.steps_per_epoch_train,
                               train_data=train_dataset,
                               validation_steps=FLAGS.steps_per_epoch_eval,
                               eval_data=valid_dataset,
                               output_dir=FLAGS.output_dir,
                               n_steps_history=FLAGS.n_steps_history,
                               FLAGS=FLAGS,
                               decay_type=FLAGS.decay_type,
                               learning_rate=FLAGS.learning_rate,
                               s=FLAGS.decay_learning_rate,
                               n_batch_decay=FLAGS.n_batch_decay,
                               metric_accuracy=metric_accuracy)
コード例 #9
0
ファイル: logutil.py プロジェクト: monperrus/ProGraML
  file_log_level: int = logging.DEBUG,
>>>>>>> 4242aed2a... Automated code format.
) -> None:
  """Log messages to file as well as stderr.

  Args:
    program_name: The name of the program.
    log_dir: The directory to log to.
    file_log_level: The minimum verbosity level to log to file to.

  Raises:
    FileNotFoundError: If the requested log_dir does not exist.
  """
  if not pathlib.Path(log_dir).is_dir():
    raise FileNotFoundError(f"Log directory not found: '{log_dir}'")
  old_verbosity = logging.get_verbosity()
  logging.set_verbosity(file_log_level)
  logging.set_stderrthreshold(old_verbosity)
  logging.get_absl_handler().start_logging_to_file(program_name, log_dir)
  # The Absl logging handler function start_logging_to_file() sets logtostderr
  # to False. Re-enable whatever value it was before the call.
  FLAGS.logtostderr = False


def StopTeeLogsToFile():
  """Stop logging messages to file as well as stderr."""
  logging.get_absl_handler().flush()
  logging.get_absl_handler().stream = sys.stderr
  FLAGS.logtostderr = True

コード例 #10
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import io
import re

from absl import logging
from absl.testing import absltest
import mock

# We do this before our library imports in order to catch any Python stderr
# output they may generate.  We don't want them to; capture and check.
fake_stderr_type = io.BytesIO if bytes is str else io.StringIO

logging.get_verbosity()  # Access --verbosity before flag parsing.
# Access --logtostderr before flag parsing.
logging.get_absl_handler().use_absl_log_file()


class Error(Exception):
    pass


# Pre-initialization (aka "import" / __main__ time) test.  Checked below.
with mock.patch('sys.stderr', new=fake_stderr_type()) as pre_init_mock_stderr:
    # Trigger the notice to stderr once.  infos and above go to stderr.
    logging.debug('Debug message at parse time.')
    logging.info('Info message at parse time.')
    logging.error('Error message at parse time.')
    logging.warning('Warning message at parse time.')
コード例 #11
0
def run_report_and_wait(profile_id: str, report_id: str) -> List:
  """Run the report and wait for it to complete and return the data.

  Runs the report and waits for it to complete and returns the data.

  Args:
    profile_id: The profile id of the account being used.
    report_id: The id of the report to run.

  Returns:
    The report data.
  """
  service = _get_service()
  report_file = service.reports().run(
      profileId=profile_id,
      reportId=report_id,
  ).execute()
  if logging.get_verbosity() == 1:  # Debug.
    pp = pprint.PrettyPrinter(indent=2)
    logging.debug(pp.pformat(report_file))

  # Wait for report file to finish processing.
  # An exponential backoff strategy is used to conserve request quota.
  sleep = 0
  start_time = time.time()
  while True:
    report_file = service.files().get(
        reportId=report_id, fileId=report_file['id']).execute()

    status = report_file['status']
    if status == 'REPORT_AVAILABLE':
      logging.info('File status is %s, ready to download.', status)
      break
    elif status != 'PROCESSING':
      logging.info('File status is %s, processing failed.', status)
      raise ReportRunError
    elif time.time() - start_time > MAX_RETRY_ELAPSED_TIME:
      logging.info('File processing deadline exceeded.')
      raise ReportRunDeadlineExceeded

    sleep = _next_sleep_interval(sleep)
    logging.info('File status is %s, sleeping for %d seconds.', status, sleep)
    time.sleep(sleep)

  bytesio = io.BytesIO()
  # Create a get request.
  request = service.files().get_media(
      reportId=report_id, fileId=report_file['id'])

  # Create a media downloader instance.
  # Optional: adjust the chunk size used when downloading the file.
  downloader = http.MediaIoBaseDownload(bytesio, request, chunksize=CHUNK_SIZE)

  # Execute the get request and download the file.
  download_finished = False
  while download_finished is False:
    _, download_finished = downloader.next_chunk()

  csvreader = csv.reader(bytesio.getvalue().decode('utf-8').splitlines())
  report_data = list(csvreader)
  report_data_cleaned = _clean_up(report_data)
  heading = report_data_cleaned[0]
  report_data_objects = []
  for row in report_data_cleaned[1:]:
    report_data_objects.append({h: r for h, r in zip(heading, row)})
  return report_data_objects