def init_tflog(save_path, use_tpu=False): """ Helper function to log detailed settings in a experiment. - it creates a path named current time in the save_path - it creates a symbolic link in it. It's called 'latest'. This makes it easier to cd in latest experiment. - (if you use gpus I assume you work on local.) it makes a local log file and let tensorflow to log in that file. """ base_path = os.path.join(save_path, get_starttime()) log_verbose(folder_path=base_path) if not use_tpu: # couldn't find method to log file on cloud... import logging link_path_to_savepath = os.path.join(save_path, "latest") if os.path.islink(link_path_to_savepath): os.remove(link_path_to_savepath) os.symlink(get_starttime(), link_path_to_savepath) log_filename = os.path.join(base_path, 'log') log_format = '%(asctime)s | %(message)s' formatter = logging.Formatter(log_format, datefmt='%m/%d %I:%M:%S %p') file_handler = logging.FileHandler(log_filename) file_handler.setFormatter(formatter) from tensorflow.python.platform.tf_logging import get_logger get_logger().addHandler(file_handler)
def close_logger(): """ Close file-based outputs :return: """ # pylint: disable=no-name-in-module # This is done so if the user had TF 1.12.1 or a new version the code # does not brake. First part of the try is renaming the TF 1.12.1 to # fit the TF 1.13.1>= naming scheme, while the second is just a normal # import for TF 1.13.1>= try: # pylint: disable=no-name-in-module from tensorflow.python.platform.tf_logging import \ _get_logger as get_logger except ImportError: from tensorflow.python.platform.tf_logging import get_logger logger = get_logger() for handler in reversed(logger.handlers): try: handler.flush() handler.close() logger.removeHandler(handler) except (OSError, ValueError): pass
def set_logger(file_name=None): """ Writing logs to a file if file_name, the handler needs to be closed by `close_logger()` after use. :param file_name: :return: """ # pylint: disable=no-name-in-module from tensorflow.python.platform.tf_logging import get_logger logger = get_logger() tf.logging.set_verbosity(tf.logging.INFO) logger.handlers = [] # adding console output f = log.Formatter(CONSOLE_LOG_FORMAT) std_handler = log.StreamHandler(sys.stdout) std_handler.setFormatter(f) logger.addHandler(std_handler) if file_name: # adding file output f = log.Formatter(FILE_LOG_FORMAT) file_handler = log.FileHandler(file_name) file_handler.setFormatter(f) logger.addHandler(file_handler)
def __init__(self, flags, darknet=None): FlagIO.__init__(self, subprogram=True) speak = True if darknet is None else False # Setup logging verbosity tf_logger = tf_logging.get_logger() # remove default StreamHandler and use the tf_handler from utils.flags tf_logger.handlers = [] tf_logger.addHandler(self.tf_logfile) if os.stat(self.tf_logfile.baseFilename).st_size > 0: self.tf_logfile.doRollover() self.flags = self.read_flags() self.io_flags() self.ntrain = 0 if self.flags.verbalise: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' tf.logging.set_verbosity(tf.logging.DEBUG) else: os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' tf.logging.set_verbosity(tf.logging.FATAL) if self.flags.pb_load and self.flags.meta_load: self.logger.info('Loading from .pb and .meta') self.graph = tf.Graph() if flags.gpu > 0.0: device_name = flags.gpu_name else: device_name = None with tf.device(device_name): with self.graph.as_default() as g: self.build_from_pb() return if darknet is None: darknet = Darknet(flags) self.ntrain = len(darknet.layers) self.darknet = darknet args = [darknet.meta, flags] self.num_layer = len(darknet.layers) self.framework = create_framework(*args) self.meta = darknet.meta if speak: self.logger.info('Building net ...') start = time.time() self.graph = tf.Graph() if flags.gpu > 0.0: device_name = flags.gpu_name else: device_name = None with tf.device(device_name): with self.graph.as_default(): self.build_forward() self.setup_meta_ops() self.logger.info('Finished in {}s'.format(time.time() - start))
def save_weights(self, filepath: Optional[str] = None, overwrite: bool = True) -> Networks: r""" Just copy this function here to fix the `save_format` to 'tf' Since saving 'h5' will drop certain variables. """ if filepath is None: filepath = self.save_path assert filepath is not None with open(filepath + '.trainer', 'wb') as f: pickle.dump(self.trainer, f) logging.get_logger().disabled = True super().save_weights(filepath=filepath, overwrite=overwrite, save_format='tf') logging.get_logger().disabled = False return self
def close_logger(): """ Close file-based outputs :return: """ # pylint: disable=no-name-in-module from tensorflow.python.platform.tf_logging import get_logger logger = get_logger() for handler in reversed(logger.handlers): try: handler.flush() handler.close() logger.removeHandler(handler) except (OSError, ValueError): pass
def set_up_logging( log_path=None, level=logging.INFO, formatter='%(asctime)s - %(name)s - %(levelname)s - %(message)s'): tf_logging.set_verbosity(level) # create file handler which logs even debug messages if log_path: try: fh = logging.FileHandler(log_path) fh.setLevel(level) fh.setFormatter(logging.Formatter(formatter)) tf_logger = tf_logging.get_logger() tf_logger.addHandler(fh) tf_logger.info('Saving logs to "%s"' % log_path) tf_logger.propagate = False except FileNotFoundError: tf.logging.info('Cannot save logs to file in Cloud ML Engine')
def setup_tensorflow(device: Union[str, int, Sequence[int], Sequence[str]], allow_growth: bool): """Setup tensorflow session according to gpu configuration. Args: device (Union[str, int, Sequence[int], Sequence[str]]): GPU or list of GPUs to run on allow_growth (bool): Whether to capture all memory on gpu or grow as necessary Returns: sess (tf.Session): Tensorflow Session object as the default session """ if isinstance(device, int): device = str(device) elif isinstance(device, list): device = ', '.join([str(d) for d in device]) elif not isinstance(device, str): raise ValueError( "Unrecognized device type. Expected int, str, or list. " "Received {}.".format(type(device))) os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = device os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" # disable tensorflow info logging tf.logging.set_verbosity(tf.logging.WARN) from tensorflow.python.platform import tf_logging try: # tensorflow == 1.13 tf_logging.get_logger().propagate = False except AttributeError: # tensorflow <= 1.12 tf_logging._get_logger().propagate = False gpu_options = tf.GPUOptions(allow_growth=allow_growth) conf = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) sess = tf.get_default_session() if sess is None: sess = tf.Session(config=conf) sess.__enter__() # type: ignore np.set_printoptions(suppress=True) return sess
def log_cleaning(hide_deprecation_warnings=False): if hide_deprecation_warnings: warnings.simplefilter("ignore") from tensorflow.python.util import deprecation from tensorflow.python.util import deprecation_wrapper deprecation._PRINT_DEPRECATION_WARNINGS = False deprecation_wrapper._PER_MODULE_WARNING_LIMIT = 0 formatter = _logging.Formatter('[%(levelname)s] %(message)s') from tensorflow.python.platform import tf_logging tf_logging.get_logger().propagate = False _logging.getLogger().propagate = False for handler in _logging.getLogger().handlers: handler.setFormatter(formatter)
def set_logger(file_name=None): """ Writing logs to a file if file_name, the handler needs to be closed by `close_logger()` after use. :param file_name: :return: """ # pylint: disable=no-name-in-module # This is done so if the user had TF 1.12.1 or a new version the code # does not brake. First part of the try is renaming the TF 1.12.1 to # fit the TF 1.13.1>= naming scheme, while the second is just a normal # import for TF 1.13.1>= try: # pylint: disable=no-name-in-module from tensorflow.python.platform.tf_logging import \ _get_logger as get_logger except ImportError: from tensorflow.python.platform.tf_logging import get_logger logger = get_logger() tf.logging.set_verbosity(tf.logging.INFO) logger.handlers = [] # adding console output f = log.Formatter(CONSOLE_LOG_FORMAT) std_handler = log.StreamHandler(sys.stdout) std_handler.setFormatter(f) logger.addHandler(std_handler) if file_name: # adding file output f = log.Formatter(FILE_LOG_FORMAT) file_handler = log.FileHandler(file_name) file_handler.setFormatter(f) logger.addHandler(file_handler)
from __future__ import print_function import argparse import os.path import sys import tensorflow.compat.v1 as tf tf.disable_v2_behavior() from tensorflow.python.ops import gen_audio_ops as audio_ops import input_data import models from tensorflow.python.framework import graph_util from tensorflow.python.platform import tf_logging as logging logging.get_logger().propagate = False FLAGS = None def create_inference_graph( wanted_words, sample_rate, nchannels, clip_duration_ms, clip_stride_ms, representation, window_size_ms, window_stride_ms, nwindows, dct_coefficient_count, filterbank_channel_count, model_architecture, filter_counts, filter_sizes, final_filter_len, dropout_prob, batch_size, dilate_after_layer, stride_after_layer, connection_type, silence_percentage, unknown_percentage): """Creates an audio model with the nodes needed for inference. Uses the supplied arguments to create a model, and inserts the input and output nodes that are needed to use the graph for inference.