def read_config(p: Path = config_path): try: with open(p.absolute(), "r") as f: return Settings(**json.load(f)) except Exception as e: logger.error(f"Issue reading settings.json, {e}") return Settings(**default_config)
class SettingsDictAction(argparse.Action): """Handles the settings dict with argparse""" VALUE_DICT = { "x264": Settings(), "nhevc": Settings( width=1920, height=1080, extension="_1080p", parameter=EncodingParameters( resolution=[ "1920x1080", "1600x900", "1280x720", ], qp=[24, 29, 34, 40, 46], gop_l=[1, 3, 6, 10, 20, 40, 120], rate=[30, 15, 3], ), ffmpeg_config=lambda params: f"hevc_nvenc -g {params.gop_len} -qp {params.qp}", ), } def __call__(self, arg_parser, namespace, values, option_string=None): setattr(namespace, self.dest, self.VALUE_DICT.get(values))
def _load_model(): # These imports are slow - lazy import. import tensorflow as tf from data import Vocab from model import Hps, Settings, SummarizationModel global _settings, _hps, _vocab, _sess, _model # Define settings and hyperparameters _settings = Settings( embeddings_path='', log_root='', trace_path='', # traces/traces_blog', ) _hps = Hps( # parameters important for decoding attn_only_entities=False, batch_size=_beam_size, copy_only_entities=False, emb_dim=128, enc_hidden_dim=200, dec_hidden_dim=300, max_dec_steps=1, max_enc_steps=400, mode='decode', output_vocab_size=20000, restrictive_embeddings=False, save_matmul=False, tied_output=True, two_layer_lstm=True, # other parameters adagrad_init_acc=.1, adam_optimizer=True, copy_common_loss_wt=0., cov_loss_wt=0., high_attn_loss_wt=0., lr=.15, max_grad_norm=2., people_loss_wt=0., rand_unif_init_mag=.02, scatter_loss_wt=0., sharp_loss_wt=0., trunc_norm_init_std=1e-4, ) # Define model _vocab = Vocab(_vocab_path, _vocab_size) _model = SummarizationModel(_settings, _hps, _vocab) _model.build_graph() # Load model from disk saver = tf.train.Saver() config = tf.ConfigProto( allow_soft_placement=True, #intra_op_parallelism_threads=1, #inter_op_parallelism_threads=1, ) _sess = tf.Session(config=config) ckpt_state = tf.train.get_checkpoint_state(_model_dir) saver.restore(_sess, ckpt_state.model_checkpoint_path)
def _get_namespace(domain): global mapping if mapping is None: namespace_manager.set_namespace('global') settings = ndb.Key(Settings, '_settings').get() if not settings: settings = Settings(id='_settings') settings.set_mapping({}) settings.put() mapping = settings.get_mapping_dict() return mapping.get(domain, domain)
def ensure_global_settings(): current_namespace = namespace_manager.get_namespace() namespace_manager.set_namespace('global') settings = ndb.Key(Settings, '_settings').get() settings_updated = False if not settings: logging.debug('Settings not found. Init.') settings = Settings(id='_settings') for s, v in default_settings.items(): if not hasattr(settings, s): setattr(settings, s, v) settings_updated = True if settings_updated: settings.put() namespace_manager.set_namespace(current_namespace) logging.info('Running with settings %s', settings)
def __getattr__(self, name): namespace = namespace_manager.get_namespace() now = time.time() if not self.settings or now >= _last_config_refreshes.get(namespace, now - 300) + 300: self.settings = ndb.Key(Settings, '_settings').get(use_cache=False, use_memcache=False) if not self.settings: self.settings = Settings(id='_settings') settings_updated = False for s, v in default_settings.items(): if not hasattr(self.settings, s): setattr(self.settings, s, v) settings_updated = True if settings_updated: self.settings.put() logging.info('Settings refreshed.') _last_config_refreshes[namespace] = now return getattr(self.settings, name)
def main(unused_argv): if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly raise Exception("Problem with flags: %s" % unused_argv) tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode)) # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name) if not os.path.exists(FLAGS.log_root): if FLAGS.mode=="train": os.makedirs(FLAGS.log_root) else: raise Exception( "Logdir %s doesn't exist. Run in train mode to create it." % (FLAGS.log_root) ) vocab_size = 50000 if FLAGS.restrictive_embeddings else 20000 if FLAGS.output_vocab_size == 0: FLAGS.output_vocab_size = vocab_size assert FLAGS.output_vocab_size <= vocab_size vocab = Vocab(FLAGS.vocab_path, vocab_size) # create a vocabulary # If in decode mode, set batch_size = beam_size # Reason: in decode mode, we decode one example at a time. # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch # of these hypotheses. if FLAGS.mode == 'decode': FLAGS.batch_size = FLAGS.beam_size # If single_pass=True, check we're in decode mode if FLAGS.single_pass and FLAGS.mode != 'decode': raise Exception("The single_pass flag should only be True in decode mode") if FLAGS.restrictive_embeddings and not FLAGS.embeddings_path: raise Exception("Cannot use restrictive embeddings with no pretrained embeddings") settings_dict = {} hps_dict = {} for key, val in FLAGS.__flags.iteritems(): # for each flag if key in Settings._fields: settings_dict[key] = val elif key in Hps._fields: hps_dict[key] = val settings = Settings(**settings_dict) hps = Hps(**hps_dict) # Create a batcher object that will create minibatches of data batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass) tf.set_random_seed(111) # a seed value for randomness if hps.mode == 'train': print "creating model..." model = SummarizationModel(settings, hps, vocab) setup_training(model, batcher, hps, vocab_size) elif hps.mode == 'eval': model = SummarizationModel(settings, hps, vocab) run_eval(model, batcher, vocab) elif hps.mode == 'decode': # The model is configured with max_dec_steps=1 because we only ever run one step of the # decoder at a time (to do beam search). Note that the batcher is initialized with # max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries. decode_model_hps = hps._replace(max_dec_steps=1) model = SummarizationModel(settings, decode_model_hps, vocab) decoder = BeamSearchDecoder(model, batcher, vocab) # decode indefinitely (unless single_pass=True, in which case decode the dataset exactly once) decoder.decode() else: raise ValueError("The 'mode' flag must be one of train/eval/decode")
def write_config(p: Path = config_path, data: dict = None): data = data or default_config with open(p.absolute(), "w") as f: json.dump(data, f) return Settings(**data)
def load_settings(path=None): with open((path or config_path).absolute(), "r") as f: return Settings(**json.load(f))