def _read_config_files(self, run_paths): configs = {} config_fpaths = {} for run_name, logdir in run_paths.items(): config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if not file_io.file_exists(config_fpath): # Skip runs that have no config file. continue # Read the config file. file_content = file_io.read_file_to_string(config_fpath).decode( 'utf-8') config = ProjectorConfig() text_format.Merge(file_content, config) if not config.model_checkpoint_path: # See if you can find a checkpoint file in the logdir. ckpt_path = latest_checkpoint(logdir) if not ckpt_path: # Or in the parent of logdir. ckpt_path = latest_checkpoint(os.path.join('../', logdir)) if not ckpt_path: logging.warning('Cannot find model checkpoint in %s', logdir) continue config.model_checkpoint_path = ckpt_path # Sanity check for the checkpoint file. if not file_io.file_exists(config.model_checkpoint_path): logging.warning('Checkpoint file %s not found', config.model_checkpoint_path) continue configs[run_name] = config config_fpaths[run_name] = config_fpath return configs, config_fpaths
def _read_latest_config_files(self, run_path_pairs): """Reads and returns the projector config files in every run directory.""" configs = {} config_fpaths = {} for run_name, logdir in run_path_pairs: config = ProjectorConfig() config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if file_io.file_exists(config_fpath): file_content = file_io.read_file_to_string(config_fpath) text_format.Merge(file_content, config) has_tensor_files = False for embedding in config.embeddings: if embedding.tensor_path: has_tensor_files = True break if not config.model_checkpoint_path: # See if you can find a checkpoint file in the logdir. ckpt_path = _find_latest_checkpoint(logdir) if not ckpt_path and not has_tensor_files: continue if ckpt_path: config.model_checkpoint_path = ckpt_path # Sanity check for the checkpoint file. if (config.model_checkpoint_path and not checkpoint_exists(config.model_checkpoint_path)): logging.warning('Checkpoint file %s not found', config.model_checkpoint_path) continue configs[run_name] = config config_fpaths[run_name] = config_fpath return configs, config_fpaths
def _read_config_files(self, run_paths): configs = {} config_fpaths = {} for run_name, logdir in run_paths.items(): config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if not file_io.file_exists(config_fpath): # Skip runs that have no config file. continue # Read the config file. file_content = file_io.read_file_to_string(config_fpath).decode('utf-8') config = ProjectorConfig() text_format.Merge(file_content, config) if not config.model_checkpoint_path: # See if you can find a checkpoint file in the logdir. ckpt_path = latest_checkpoint(logdir) if not ckpt_path: # Or in the parent of logdir. ckpt_path = latest_checkpoint(os.path.join('../', logdir)) if not ckpt_path: logging.warning('Cannot find model checkpoint in %s', logdir) continue config.model_checkpoint_path = ckpt_path # Sanity check for the checkpoint file. if not file_io.file_exists(config.model_checkpoint_path): logging.warning('Checkpoint file %s not found', config.model_checkpoint_path) continue configs[run_name] = config config_fpaths[run_name] = config_fpath return configs, config_fpaths
def _read_config_files(self, run_paths, logdir): # If there are no summary event files, the projector can still work, # thus treating the `logdir` as the model checkpoint directory. if not run_paths: run_paths['.'] = logdir configs = {} config_fpaths = {} for run_name, logdir in run_paths.items(): config = ProjectorConfig() config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if file_io.file_exists(config_fpath): file_content = file_io.read_file_to_string( config_fpath).decode('utf-8') text_format.Merge(file_content, config) has_tensor_files = False for embedding in config.embeddings: if embedding.tensor_path: has_tensor_files = True break if not config.model_checkpoint_path: # See if you can find a checkpoint file in the logdir. ckpt_path = latest_checkpoint(logdir) if not ckpt_path: # Or in the parent of logdir. ckpt_path = latest_checkpoint(os.path.join('../', logdir)) if not ckpt_path and not has_tensor_files: logging.warning('Cannot find model checkpoint in %s', logdir) continue if ckpt_path: config.model_checkpoint_path = ckpt_path # Sanity check for the checkpoint file. if (config.model_checkpoint_path and not checkpoint_exists(config.model_checkpoint_path)): logging.warning('Checkpoint file %s not found', config.model_checkpoint_path) continue configs[run_name] = config config_fpaths[run_name] = config_fpath return configs, config_fpaths
def _read_config_files(self, run_paths, logdir): # If there are no summary event files, the projector can still work, # thus treating the `logdir` as the model checkpoint directory. if not run_paths: run_paths['.'] = logdir configs = {} config_fpaths = {} for run_name, logdir in run_paths.items(): config = ProjectorConfig() config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if file_io.file_exists(config_fpath): file_content = file_io.read_file_to_string(config_fpath).decode('utf-8') text_format.Merge(file_content, config) has_tensor_files = False for embedding in config.embeddings: if embedding.tensor_path: has_tensor_files = True break if not config.model_checkpoint_path: # See if you can find a checkpoint file in the logdir. ckpt_path = latest_checkpoint(logdir) if not ckpt_path: # Or in the parent of logdir. ckpt_path = latest_checkpoint(os.path.join('../', logdir)) if not ckpt_path and not has_tensor_files: logging.warning('Cannot find model checkpoint in %s', logdir) continue if ckpt_path: config.model_checkpoint_path = ckpt_path # Sanity check for the checkpoint file. if (config.model_checkpoint_path and not checkpoint_exists(config.model_checkpoint_path)): logging.warning('Checkpoint file %s not found', config.model_checkpoint_path) continue configs[run_name] = config config_fpaths[run_name] = config_fpath return configs, config_fpaths
def _latest_checkpoints_changed(configs, run_path_pairs): """Returns true if the latest checkpoint has changed in any of the runs.""" for run_name, logdir in run_path_pairs: if run_name not in configs: config = ProjectorConfig() config_fpath = os.path.join(logdir, PROJECTOR_FILENAME) if file_io.file_exists(config_fpath): file_content = file_io.read_file_to_string(config_fpath) text_format.Merge(file_content, config) else: config = configs[run_name] # See if you can find a checkpoint file in the logdir. ckpt_path = _find_latest_checkpoint(logdir) if not ckpt_path: continue if config.model_checkpoint_path != ckpt_path: return True return False
def _GenerateProjectorTestData(self, run_path): # Write a projector config file in run1. config_path = os.path.join(run_path, 'projector_config.pbtxt') config = ProjectorConfig() config_pbtxt = text_format.MessageToString(config) with tf.gfile.GFile(config_path, 'w') as f: f.write(config_pbtxt) # Write a checkpoint with some dummy variables. with tf.Graph().as_default(): sess = tf.Session() checkpoint_path = os.path.join(run_path, 'model') tf.get_variable('var1', [1, 2], initializer=tf.constant_initializer(6.0)) tf.get_variable('var2', [10, 10]) tf.get_variable('var3', [100, 100]) sess.run(tf.initialize_all_variables()) saver = tf.train.Saver() saver.save(sess, checkpoint_path)
def _GenerateProjectorTestData(self): config_path = os.path.join(self.log_dir, 'projector_config.pbtxt') config = ProjectorConfig() embedding = config.embeddings.add() # Add an embedding by its canonical tensor name. embedding.tensor_name = 'var1:0' config_pbtxt = text_format.MessageToString(config) with gfile.GFile(config_path, 'w') as f: f.write(config_pbtxt) # Write a checkpoint with some dummy variables. with ops.Graph().as_default(): sess = session.Session() checkpoint_path = os.path.join(self.log_dir, 'model') variable_scope.get_variable( 'var1', [1, 2], initializer=init_ops.constant_initializer(6.0)) variable_scope.get_variable('var2', [10, 10]) variable_scope.get_variable('var3', [100, 100]) sess.run(variables.global_variables_initializer()) saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1) saver.save(sess, checkpoint_path)
def _GenerateProjectorTestData(self, run_path): # Write a projector config file in run1. config_path = os.path.join(run_path, 'projector_config.pbtxt') config = ProjectorConfig() embedding = config.embeddings.add() # Add an embedding by its canonical tensor name. embedding.tensor_name = 'var1:0' config_pbtxt = text_format.MessageToString(config) with tf.gfile.GFile(config_path, 'w') as f: f.write(config_pbtxt) # Write a checkpoint with some dummy variables. with tf.Graph().as_default(): sess = tf.Session() checkpoint_path = os.path.join(run_path, 'model') tf.get_variable('var1', [1, 2], initializer=tf.constant_initializer(6.0)) tf.get_variable('var2', [10, 10]) tf.get_variable('var3', [100, 100]) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(write_version=tf.train.SaverDef.V1) saver.save(sess, checkpoint_path)