``` python baseline.py directory --vgpu=1 ``` """ import os import sys import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import l2o from config import ArgParser, get_eval_problem from gpu_setup import create_distribute args = ArgParser(sys.argv[1:]) vgpus = args.pop_get("--vgpu", default=1, dtype=int) cpu = args.pop_get("--cpu", default=False, dtype=bool) gpus = args.pop_get("--gpus", default=None) use_keras = args.pop_get("--keras", default=True, dtype=bool) distribute = create_distribute(vgpus=vgpus, do_cpu=cpu, gpus=gpus) problem = args.pop_get("--problem", "conv_train") target = args.pop_get("--optimizer", "adam") target_cfg = { "adam": { "class_name": "Adam", "config": { "learning_rate": 0.005, "beta_1": 0.9,
""" BLOCK_DEBUG = """ python3 evaluate.py \\ --problem={problem} \\ --directory=results/{policy}/{base}/{flags} \\ --repeat=1 \\ --debug=True """ BASE_RUNNER = ( "sbatch {queue}-N 1 -n 1 -o logs/{policy}-{base}-{flags}.log -t {time} " "{allocation}-J {shortname}{base}{flags} " "scripts/{policy}-{base}-{flags}.sh") args = ArgParser(sys.argv[1:]) flags = args.pop_get("--flags", "test").split(",") ctx = { "presets": args.pop_get("--presets", "conv_train"), "policy": args.pop_get("--policy", "rnnprop"), "strategy": args.pop_get("--strategy", "repeat"), "problem": args.pop_get("--problem", "conv_train,conv_deeper_pool,conv_cifar10_pool"), "base": args.pop_get("--base", "test"), "time": args.pop_get("--time", "24:00:00"),
my_logger.error(msg) raise Exception(msg) def handle_data(self, data): """ ABC: Handle data """ self.parser.set_html_data(data) pass if __name__ == '__main__': """ Main application processing for BookMarks """ my_logger.debug('INIT') # initialization and setup arg_parser = ArgParser() config = CfgParser() html_parser = MyHTMLParser() # open bookmarks file and feed to the parser bookmarks = None try: my_logger.info(f'Processing input file: {TheConfig.input_file}') with open(TheConfig.input_file, mode='r', encoding='utf-8') as html: bookmarks_html = html.read() html_parser.feed(bookmarks_html) bookmarks = html_parser.parser.bookmarks except Exception as e: my_logger.exception(f'Exception parsing file: {e}', exc_info=e) # analyze bookmarks just parsed
Strategy type to inform metadata flags. Can ignore if the default checkpoint is used. --periods : int Periods to evaluate. --stages : int Stages to evaluate. Only use if strategy=curriculum. (all other args) : float Passed as overrides to strategy/policy building. """ if len(sys.argv) < 2: print(HELP) exit(0) # Distribute args args = ArgParser(sys.argv[1:]) vgpus = args.pop_get("--vgpu", default=1, dtype=int) gpus = args.pop_get("--gpus", default=None) do_cpu = args.pop_get("--cpu", default=False, dtype=bool) distribute = create_distribute(vgpus=vgpus, do_cpu=do_cpu, gpus=gpus) # Core args problems = args.pop_get("--problem", "conv_train").split(",") targets = args.pop_get("--directory", "weights").split(",") repeat = args.pop_get("--repeat", default=10, dtype=int) debug = args.pop_get("--debug", default=False, dtype=bool) show_info = args.pop_get("--info", default=False, dtype=bool) recursive = args.pop_check("--recursive") # Unpack recursive if recursive:
"""Resume Training. Run with ``` python resume.py directory --vgpu=1 ``` """ import os import sys os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import l2o from config import ArgParser from gpu_setup import create_distribute args = ArgParser(sys.argv[2:]) vgpus = args.pop_get("--vgpu", default=1, dtype=int) distribute = create_distribute(vgpus=vgpus) with distribute.scope(): strategy = l2o.strategy.build_from_config(sys.argv[1]) strategy.train()
python baseline_custom.py directory --vgpu=1 ``` """ import os import sys import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import l2o from config import ArgParser, get_eval_problem from gpu_setup import create_distribute args = ArgParser(sys.argv[1:]) vgpus = args.pop_get("--vgpu", default=1, dtype=int) distribute = create_distribute(vgpus=vgpus) problem = args.pop_get("--problem", "conv_train") output = args.pop_get("--out", "eval") repeat = args.pop_get("--repeat", 10, dtype=int) policy = l2o.policies.AdamOptimizer( learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07) opt = policy.architecture(policy) with distribute.scope(): results = [] for i in range(repeat):
"""Hyperparameter grid search.""" import os import sys import numpy as np os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import l2o from config import ArgParser, get_eval_problem from gpu_setup import create_distribute args = ArgParser(sys.argv[1:]) vgpus = args.pop_get("--vgpu", default=1, dtype=int) distribute = create_distribute(vgpus=vgpus) # policy_name = args.pop_get("--optimizer", "adam") problem_name = args.pop_get("--problem", "conv_train") learning_rates = [0.0005, 0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5] policy_names = ["adam", "rmsprop", "sgd", "momentum", "addsign", "powersign"] for policy_name in policy_names: dst = "gridsearch/{}/{}".format(problem_name, policy_name) os.makedirs(dst, exist_ok=True) for lr in learning_rates: if policy_name == "adam": policy = l2o.policies.AdamOptimizer(learning_rate=lr,
--directory=weights --path/to/param1=param1_value --path/to/param2=param2_value ... ``` Optional flags: --initialize: initialize only, but don't actually run training --presets=preset1,preset2,...: presets to load. """ import os import sys from config import get_default, get_preset, ArgParser args = ArgParser(sys.argv[1:]) # Finally ready to import tensorflow os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' import tensorflow as tf import l2o from gpu_setup import create_distribute # Directory directory = args.pop_get("--directory", default="weights") # Distribute vgpus = int(args.pop_get("--vgpu", default=1)) memory_limit = int(args.pop_get("--vram", default=12000)) gpus = args.pop_get("--gpus", default=None) distribute = create_distribute(