def test_help_non_main_module_key_flags(self): flags.DEFINE_string( 'non_main_module_flag', 'default', 'help', module_name='other.module', flag_values=self._absl_flags) flags.declare_key_flag('non_main_module_flag', flag_values=self._absl_flags) parser = argparse_flags.ArgumentParser( inherited_absl_flags=self._absl_flags) help_message = parser.format_help() # Main module key fags are printed in the help message, even if the flag # is defined in another module. self.assertIn('non_main_module_flag', help_message)
def define_flags(): """Define a command line flag for each ParamSpec in _DEFAULT_PARAMS.""" define_flag = { 'boolean': flags.DEFINE_boolean, 'float': flags.DEFINE_float, 'integer': flags.DEFINE_integer, 'string': flags.DEFINE_string, } for name, param_spec in six.iteritems(_DEFAULT_PARAMS): if param_spec.flag_type not in define_flag: raise ValueError('Unknown flag_type %s' % param_spec.flag_type) else: define_flag[param_spec.flag_type](name, param_spec.default_value, param_spec.description) flags.declare_key_flag(name)
from absl import app, flags, logging from src import datasets, training, export, inference flags.declare_key_flag("image_size") flags.declare_key_flag("batch_size") flags.adopt_module_key_flags(training) flags.adopt_module_key_flags(export) flags.adopt_module_key_flags(inference) flags.DEFINE_enum("action", None, ["training", "export", "infer", "train_and_export"], "What the program need to do") flags.mark_flag_as_required("action") FLAGS = flags.FLAGS def main(argv): del argv if "training" in FLAGS.action: training.training() if "export" in FLAGS.action: export.export() if "infer" in FLAGS.action: inference.load_model_and_infer_dir() if __name__ == '__main__': app.run(main)
return True return not flags_dict['steps_to_train'] and flags_dict['filter_amount'] == 1.0 @flags.multi_flags_validator( ['use_bt', 'cbt_project', 'cbt_instance', 'cbt_table'], message='Cloud Bigtable configuration flags not correct') def _bt_checker(flags_dict): if not flags_dict['use_bt']: return True return (flags_dict['cbt_project'] and flags_dict['cbt_instance'] and flags_dict['cbt_table']) # From dual_net.py flags.declare_key_flag('work_dir') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('num_tpu_cores') flags.declare_key_flag('use_tpu') FLAGS = flags.FLAGS class EchoStepCounterHook(tf.train.StepCounterHook): """A hook that logs steps per second.""" def _log_and_record(self, elapsed_steps, elapsed_time, global_step): s_per_sec = elapsed_steps / elapsed_time logging.info("{}: {:.3f} steps per second".format(global_step, s_per_sec)) super()._log_and_record(elapsed_steps, elapsed_time, global_step)
from cnn_quantization.tf_cnn_benchmarks import flags from cnn_quantization.tf_cnn_benchmarks.cnn_util import log_fn from tensorflow.python.ops import control_flow_ops absl_flags.DEFINE_integer( 'iters_per_step', 5, 'Number of iterations to run all-reduce for, per ' 'step. Every step, a session will be run on a Graph ' 'that contains this many copies of the all-reduce. ' 'The copies are run sequentially. Setting this above ' '1 is useful to lower the overhead of starting the ' 'session run, running the VariableV2 ops at the ' 'start of the step, etc.') flags.define_flags() for name in flags.param_specs.keys(): absl_flags.declare_key_flag(name) def get_var_shapes(model): """Returns the list of variable shapes for a tf_cnn_benchmarks Model.""" with tf.Graph().as_default(): # The variable shapes do not depend on the batch size. images = tf.placeholder(tf.float32, model.get_input_shapes('train')[0]) model.build_network([images]) return [[int(d) for d in v.shape.dims] for v in tf.trainable_variables()] def all_reduce(all_device_tensors, variable_mgr): """Performs a single batch all-reduce.
def test_write_help_in_xmlformat(self): fv = flags.FlagValues() # Since these flags are defined by the top module, they are all key. flags.DEFINE_integer('index', 17, 'An integer flag', flag_values=fv) flags.DEFINE_integer('nb_iters', 17, 'An integer flag', lower_bound=5, upper_bound=27, flag_values=fv) flags.DEFINE_string('file_path', '/path/to/my/dir', 'A test string flag.', flag_values=fv) flags.DEFINE_boolean('use_gpu', False, 'Use gpu for performance.', flag_values=fv) flags.DEFINE_enum('cc_version', 'stable', ['stable', 'experimental'], 'Compiler version to use.', flag_values=fv) flags.DEFINE_list('files', 'a.cc,a.h,archive/old.zip', 'Files to process.', flag_values=fv) flags.DEFINE_list('allow_users', ['alice', 'bob'], 'Users with access.', flag_values=fv) flags.DEFINE_spaceseplist('dirs', 'src libs bins', 'Directories to create.', flag_values=fv) flags.DEFINE_multi_string('to_delete', ['a.cc', 'b.h'], 'Files to delete', flag_values=fv) flags.DEFINE_multi_integer('cols', [5, 7, 23], 'Columns to select', flag_values=fv) flags.DEFINE_multi_enum('flavours', ['APPLE', 'BANANA'], ['APPLE', 'BANANA', 'CHERRY'], 'Compilation flavour.', flag_values=fv) # Define a few flags in a different module. module_bar.define_flags(flag_values=fv) # And declare only a few of them to be key. This way, we have # different kinds of flags, defined in different modules, and not # all of them are key flags. flags.declare_key_flag('tmod_bar_z', flag_values=fv) flags.declare_key_flag('tmod_bar_u', flag_values=fv) # Generate flag help in XML format in the StringIO sio. sio = io.StringIO() if six.PY3 else io.BytesIO() fv.write_help_in_xml_format(sio) # Check that we got the expected result. expected_output_template = EXPECTED_HELP_XML_START main_module_name = sys.argv[0] module_bar_name = module_bar.__name__ if main_module_name < module_bar_name: expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR else: expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MODULE_BAR expected_output_template += EXPECTED_HELP_XML_FOR_FLAGS_FROM_MAIN_MODULE expected_output_template += EXPECTED_HELP_XML_END # XML representation of the whitespace list separators. whitespace_separators = _list_separators_in_xmlformat( string.whitespace, indent=' ') expected_output = (expected_output_template % { 'basename_of_argv0': os.path.basename(sys.argv[0]), 'usage_doc': sys.modules['__main__'].__doc__, 'main_module_name': main_module_name, 'module_bar_name': module_bar_name, 'whitespace_separators': whitespace_separators }) actual_output = sio.getvalue() self.assertMultiLineEqual(expected_output, actual_output) # Also check that our result is valid XML. minidom.parseString # throws an xml.parsers.expat.ExpatError in case of an error. xml.dom.minidom.parseString(actual_output)
'Number of games to play concurrently on each selfplay ' 'thread. Inferences from a thread\'s concurrent games are ' 'batched up and evaluated together. Increasing ' 'concurrent_games_per_thread can help improve GPU or ' 'TPU utilization, especially for small models.') flags.DEFINE_integer('mlperf_virtual_losses', -1, 'Number of virtual losses when running tree search') flags.DEFINE_float( 'mlperf_gating_win_rate', -1.0, 'Win pct against the target model to define a converged ' 'model.') flags.DEFINE_integer( 'mlperf_eval_games', -1, 'Number of games to play against the target to ' 'when determining the win pct.') flags.declare_key_flag('window_size') flags.declare_key_flag('work_dir') flags.declare_key_flag('tpu_name') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('l2_strength') flags.declare_key_flag('filter_amount') flags.declare_key_flag('lr_boundaries') flags.declare_key_flag('lr_rates') FLAGS = flags.FLAGS # Training loop state. class State: def __init__(self, model_num): self.start_time = time.time()
def core_fn(*args, **kwargs): key_flags = f(*args, **kwargs) [flags.declare_key_flag(fl) for fl in key_flags] # pylint: disable=expression-not-assigned
# minigo verbose flags.DEFINE_boolean('verbose', True, '') # rank allocation flags flags.DEFINE_integer( 'num_gpus_train', 1, 'Train uses gpus 0 through num_gpus_train-1 on respective MPI ranks.') flags.DEFINE_integer('procs_per_gpu', 1, 'MPI processes per gpu.') flags.DEFINE_integer('rank_gpu_index', 0, 'GPU that this rank uses.') flags.DEFINE_integer('ranks_per_node', 2, 'MPI ranks per node.') flags.DEFINE_integer('num_nodes', 1, 'Number of nodes.') flags.DEFINE_integer('num_train_nodes', 1, 'Number of nodes for training.') flags.DEFINE_integer('num_selfplay_nodes', 1, 'Number of nodes for selfplay.') # From train.py flags.declare_key_flag('freeze') flags.declare_key_flag('use_trt') flags.declare_key_flag('trt_max_batch_size') flags.declare_key_flag('trt_precision') flags.declare_key_flag('shuffle_buffer_size') flags.declare_key_flag('shuffle_examples') flags.declare_key_flag('window_size') # From dual_net.py flags.declare_key_flag('work_dir') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('lr_rates') flags.declare_key_flag('lr_boundaries') flags.declare_key_flag('l2_strength') flags.declare_key_flag('conv_width') flags.declare_key_flag('fc_width')
--export_path=/tmp/published_models_dir """ import os from absl import app, flags import dual_net import utils flags.DEFINE_string('export_path', None, 'Where to export the model after training.') flags.DEFINE_bool('create_bootstrap', True, 'Whether to create a bootstrap model before exporting') flags.declare_key_flag('work_dir') FLAGS = flags.FLAGS def main(unused_argv): """Bootstrap random weights.""" utils.ensure_dir_exists(os.path.dirname(FLAGS.export_path)) if FLAGS.create_bootstrap: dual_net.bootstrap() dual_net.export_model(FLAGS.export_path) if __name__ == '__main__': flags.mark_flags_as_required(['work_dir', 'export_path']) app.run(main)
return not flags_dict['steps_to_train'] and flags_dict[ 'filter_amount'] == 1.0 @flags.multi_flags_validator( ['use_bt', 'cbt_project', 'cbt_instance', 'cbt_table'], message='Cloud Bigtable configuration flags not correct') def _bt_checker(flags_dict): if not flags_dict['use_bt']: return True return (flags_dict['cbt_project'] and flags_dict['cbt_instance'] and flags_dict['cbt_table']) # From dual_net.py flags.declare_key_flag('work_dir') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('num_tpu_cores') flags.declare_key_flag('use_tpu') flags.declare_key_flag('dist_train') flags.declare_key_flag('training_seed') flags.declare_key_flag('use_bfloat16') FLAGS = flags.FLAGS class EchoStepCounterHook(tf.estimator.StepCounterHook): """A hook that logs steps per second.""" def _log_and_record(self, elapsed_steps, elapsed_time, global_step): s_per_sec = elapsed_steps / elapsed_time logging.info("{}: {:.3f} steps per second".format(
import dual_net import preprocessing import utils flags.DEFINE_integer('examples_to_validate', 50 * 2048, 'Number of examples to run validation on.') flags.DEFINE_string('validate_name', 'selfplay', 'Name of validation set (i.e. selfplay or human).') flags.DEFINE_bool('expand_validation_dirs', True, 'Whether to expand the input paths by globbing. If false, ' 'directly read and validate on the given files.') # From dual_net.py flags.declare_key_flag('work_dir') flags.declare_key_flag('use_tpu') flags.declare_key_flag('num_tpu_cores') FLAGS = flags.FLAGS def validate(*tf_records): """Validate a model's performance on a set of holdout data.""" if FLAGS.use_tpu: def _input_fn(params): return preprocessing.get_tpu_input_tensors( params['batch_size'], tf_records, filter_amount=0.05, shuffle_examples=False) else: def _input_fn():
def declare_key_flags(flag_values=FLAGS): """Declares a few key flags.""" for flag_name in DECLARED_KEY_FLAGS: flags.declare_key_flag(flag_name, flag_values=flag_values)
def core_fn(*args, **kwargs): key_flags = f(*args, **kwargs) [flags.declare_key_flag(fl) for fl in key_flags]
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import flags from absl.testing import parameterized from six.moves import range import tensorflow.compat.v1 as tf # tf from summae import pors from summae import util # pylint: disable=invalid-name FLAGS = flags.FLAGS flags.declare_key_flag('task') flags.declare_key_flag('use_tpu') flags.declare_key_flag('pretrain_as_autoencoder') flags.declare_key_flag('in_domain_pretrain_steps') flags.declare_key_flag('out_domain_pretrain_steps') flags.declare_key_flag('decode_reconstructions') class PorsTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): super(PorsTest, self).setUp() FLAGS.task = 'rocstories' FLAGS.use_tpu = False FLAGS.pretrain_as_autoencoder = True FLAGS.decode_reconstructions = False
sys.path.insert(0, '.') from absl import app, flags from tensorflow.compat.v1 import gfile import tensorflow.compat.v1 as tf from rl_loop import fsdb import mask_flags from rl_loop import shipname import utils import dual_net flags.DEFINE_string('pro_dataset', None, 'Location of preprocessed pro dataset for validation') # From fsdb.py - must pass one of the two. flags.declare_key_flag('base_dir') flags.declare_key_flag('bucket_name') FLAGS = flags.FLAGS try: if 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS' in os.environ: TPU_NAME = os.environ['KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'] else: TPU_NAME = os.environ['TPU_NAME'] except KeyError: raise Exception("Must have $TPU_NAME configured") def train(): model_num, model_name = fsdb.get_latest_model()
import utils from sgf_wrapper import replay_sgf, replay_sgf_file, translate_sgf_move, make_sgf flags.DEFINE_string('load_file', None, 'Path to model save files.') flags.DEFINE_string('selfplay_dir', None, 'Where to write game data.') flags.DEFINE_string('holdout_dir', None, 'Where to write held-out game data.') flags.DEFINE_string('sgf_dir', None, 'Where to write human-readable SGFs.') flags.DEFINE_float('holdout_pct', 0.05, 'What percent of games to hold out.') flags.DEFINE_float('resign_disable_pct', 0.05, 'What percent of games to disable resign for.') # From strategies.py flags.declare_key_flag('verbose') flags.declare_key_flag('num_readouts') save_path = 'results/' FLAGS = flags.FLAGS N = 19 readouts = 50 def cross_entropy_mcts(dict1, dict2, a_b): ''' This function calculates cross entropy of probability distributions of actions in dict2 wrt dict1 (without considering a_b) ''' P1 = [] #values of moves in dictP^dictQ wrt P P2 = [] #values of moves in dictP^dictQ wrt Q for move in dict1:
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import time from absl import app, flags sys.path.insert(0, '.') from rl_loop import fsdb import mask_flags # From rl_loop/fsdb.py flags.declare_key_flag('bucket_name') # "_nr" signifies "No Resign", aka calibration game, which will use a different # set of flags and will not update its flags from a remote flagfile. flags.DEFINE_enum('mode', None, ['cc', 'tpu', 'tpu_nr'], 'Which setup to use: cc on GPU or cc/py on TPU.') FLAGS = flags.FLAGS def run_cc(): _, model_name = fsdb.get_latest_model() num_games_finished = len(fsdb.get_games(model_name)) if num_games_finished > 25000: print("{} has enough games! ({})".format( model_name, num_games_finished))
if flags['use_bt'] else True, '`use_bt` flag only valid with `use_tpu` as well') @flags.multi_flags_validator( ['use_bt', 'cbt_project', 'cbt_instance', 'cbt_table'], message='Cloud Bigtable configuration flags not correct') def _bt_checker(flags_dict): if not flags_dict['use_bt']: return True return (flags_dict['cbt_project'] and flags_dict['cbt_instance'] and flags_dict['cbt_table']) # From dual_net.py flags.declare_key_flag('work_dir') flags.declare_key_flag('train_batch_size') flags.declare_key_flag('num_tpu_cores') flags.declare_key_flag('num_ipu_cores') flags.declare_key_flag('iterations_per_loop') flags.declare_key_flag('use_tpu') flags.declare_key_flag('PROFILING') FLAGS = flags.FLAGS class EchoStepCounterHook(tf.train.StepCounterHook): """A hook that logs steps per second.""" def _log_and_record(self, elapsed_steps, elapsed_time, global_step): s_per_sec = elapsed_steps / elapsed_time logging.info("{}: {:.3f} steps per second".format(