def test_make_shuffled_deck_length(): num_cards = 20 config = build_config(0, num_cards, 0, False) # test deck = Dealer(config)._make_shuffled_deck() assert len(deck) == 20
def test_make_shuffled_deck_shuffled(): num_cards = 20 config = build_config(0, num_cards, 0, False) # test deck = Dealer(config)._make_shuffled_deck() count = sum(deck) assert count == (num_cards * (num_cards + 1)) / 2
def main(): one_mrad = math.degrees(0.001) iteration = get_iteration() print "Running systematics mc iteration", iteration #for run in [10069]: #, 10064, 10051, 10052,]: # #for run in [9883, 9885, 9886,]: #for run in [10243, 10245, 10246,]: #for run in [10314, 10317, 10318, 10319,]: #for run in [10508, 10504, 10509,]: #for run in [9911, 10268,]: #for run in [9910, 10267,]: #for run in [9909, 10265,]: #for run in [9911, 9910, 9909,]: #for run in [10268, 10267, 10265,]: for run in [ 9885, ]: #for run in [10317,]: #for run in [9911, 9910, 9909,]: #my_config = config.build_config(run, "tku", "base", iteration) #do_one(my_config) #######continue ## continue if only running tku_base hybrid mc (use this for amplitude/density/frac_emittance corrections from beam) #for tracker in ["tku", "tkd"]: #for tracker in ["tku"]: #rotation = {"x":one_mrad*3, "y":0., "z":0.} #my_config = config.build_config(run, tracker, "rot_plus", iteration, rotation = rotation) #do_one(my_config) #position = {"x":3., "y":0., "z":0.} #my_config = config.build_config(run, tracker, "pos_plus", iteration, position = position) #do_one(my_config) #scale = {"C":1.03, "E2":1.05, "E1":1.05} #scale = {"C":1.03, } #base_scale = {"E2":1.0, "E1":1.0, "C":1.0} #for key in scale.keys(): # base_scale[key] = scale[key] # name = "scale_"+key+"_plus" # my_config = config.build_config(run, tracker, name, iteration, currents = base_scale) # base_scale[key] = 1.0 # do_one(my_config) #my_config = config.build_config(run, tracker, "density_plus", iteration, density = 3.0) #do_one(my_config) for tracker in ["tkd"]: scale = { "C": 1.03, } base_scale = {"E2": 1.0, "E1": 1.0, "C": 1.0} for key in scale.keys(): base_scale[key] = scale[key] name = "scale_" + key + "_plus" my_config = config.build_config(run, tracker, name, iteration, currents=base_scale) base_scale[key] = 1.0 do_one(my_config) """my_config = config.build_config(run, "tku", "base", iteration)
def main(): build_config() parser = ArgumentParser() parser.add_argument('--research_dir') parser.add_argument('--training_dir') parser.add_argument('--model_name', default="object-detection") parser.add_argument('--model_version', default="1.0.0") parser.add_argument('--train_build_id') parser.add_argument('--train_checkpoint') args, _ = parser.parse_known_args() targs = sys.argv[:] targs[0] = args.research_dir + '/object_detection/export_inference_graph.py' targs.insert(0, sys.executable or 'python') targs.append("--pipeline_config_path") targs.append("faster_rcnn.config") targs.append("--trained_checkpoint_prefix") targs.append("%s/%s/model.ckpt-%s" % (args.training_dir, args.train_build_id, args.train_checkpoint)) targs.append("--output_directory") targs.append("%s/model/%s" % (args.training_dir, args.train_build_id)) targs.append("--input_type") targs.append("encoded_image_string_tensor") print("Execute: ", targs) call(targs) m = client.Client() m.model_upload( args.model_name, args.model_version, '%s/model/%s/saved_model' % (args.training_dir, args.train_build_id), ) m.update_task_info({ 'model': '#/%s/catalog/mlmodel/%s/versions/%s' % ( os.environ['WORKSPACE_NAME'], args.model_name, args.model_version, ), })
def main(): actions = list() for arg in argv[1:]: if arg.find("=") == -1: actions.append(arg) else: continue build_config(argv[1:]) logging.basicConfig(level=logging.INFO) if actions.__contains__("help"): print("Training utility") return 0 if actions.__contains__("train"): trainer.start(model=defaults["model"]) if actions.__contains__("eval"): test.evaluate() if actions.__contains__("encrypt"): P = input("Text: ") K = test.keygen() C = test.encrypt(P, K) if actions.__contains__("decrypt"): D, G = test.decrypt(C, K, witheve=actions.__contains__("eve")) print("Decrypted:", test.decode(D)) if actions.__contains__("eve"): print("Eve Guess:", test.decode(G)) test.evaluate_manual(P, D) else: print("Encrypted:", C) print("Encrypted (decoded):", test.decode(C, digest="hex")) print("Key:", K) if actions.__contains__("decrypt") and not actions.__contains__("encrypt"): C = input("Cipher: ") K = input("Key: ") D = test.decrypt(str_to_bintensor(C, encoding="hex"), K) print("Decrypted:", test.decode(D)) print()
def main(): one_mrad = math.degrees(0.001) iteration = get_iteration() print "Running systematics mc iteration", iteration for run in [10069]: #, 10064, 10051, 10052,]: # my_config = config.build_config(run, "tku", "base", iteration) do_one(my_config) continue for tracker in ["tku", "tkd"]: rotation = {"x": one_mrad * 3, "y": 0., "z": 0.} my_config = config.build_config(run, tracker, "rot_plus", iteration, rotation=rotation) do_one(my_config) position = {"x": 3., "y": 0., "z": 0.} my_config = config.build_config(run, tracker, "pos_plus", iteration, position=position) do_one(my_config) scale = {"C": 1.03, "E2": 1.05, "E1": 1.05} base_scale = {"E2": 1.0, "E1": 1.0, "C": 1.0} for key in scale.keys(): base_scale[key] = scale[key] name = "scale_" + key + "_plus" my_config = config.build_config(run, tracker, name, iteration, currents=base_scale) base_scale[key] = 1.0 do_one(my_config) my_config = config.build_config(run, tracker, "density_plus", iteration, density=3.0) do_one(my_config)
def main(): build_config() parser = ArgumentParser() parser.add_argument('--training_dir') parser.add_argument('--research_dir') parser.add_argument('--build_id') parser.add_argument('--num_steps', default=1000) parser.add_argument('--only_train', default='False') args, _ = parser.parse_known_args() targs = sys.argv[:] targs[0] = args.research_dir + '/object_detection/model_main.py' targs.insert(0, sys.executable or 'python') targs.append("--pipeline_config_path") targs.append("faster_rcnn.config") targs.append("--model_dir") targs.append("%s/%s" % (args.training_dir, args.build_id)) call(targs) client.Client().update_task_info({'train_build_id': args.build_id})
def main(): build_config() with open('faster_rcnn.config', 'r') as cf: data = cf.read() config_html = '<html><head></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">{}</pre></body></html>'.format( data) client.Client().update_task_info({'#documents.config.html': config_html}) parser = ArgumentParser() parser.add_argument('--training_dir') parser.add_argument('--research_dir') parser.add_argument('--model_name', default="object-detection") parser.add_argument('--model_version', default="1.0.0") parser.add_argument('--build_id') parser.add_argument('--num_steps') args, _ = parser.parse_known_args() export_subprocess(args.research_dir, args.training_dir, args.build_id, args.num_steps, args.model_name, args.model_version)
def test_partition_basic(): num_players = 3 num_cards = 12 config = build_config(num_players, num_cards, 0, False) deck = list(range(1, num_cards + 1)) # test result = Dealer(config)._partition(deck, config.num_cards_per_hand) assert len(result) == 4 for hand in result: assert len(hand) == 3
def test_deal_basic(): p1 = Player("mozart", build_selector("next_card")) p2 = Player("beethoven", build_selector("next_card")) p3 = Player("chopin", build_selector("next_card")) players = [p1, p2, p3] num_players = len(players) num_cards = 12 config = build_config(num_players, num_cards, 0, False) # test result = Dealer(config).deal(players) kitty = result.kitty players = result.players assert len(kitty.hand.cards) == 3 for player in players: assert len(player.hand.cards) == 3
def test_get_bids_basic(): players = [ Player("mozart", s.build_selector("next_card"), h.Hand([1, 2, 3])), Player("beethoven", s.build_selector("next_card"), h.Hand([4, 5, 6])), Player("chopin", s.build_selector("next_card"), h.Hand([7, 8, 9])) ] num_players = len(players) num_cards = 12 config = c.build_config(num_players, num_cards, 0, False) prize_card = 10 # test bids = round.get_bids(prize_card, players, config.max_card) assert len(bids) == 3 assert bids[0].bidder.name == "mozart" assert bids[0].offer == 1 assert bids[0].prize_card == prize_card
# Change the current directory to the location of the application os.chdir(os.path.dirname(os.path.realpath(sys.argv[0]))) # CLI argument parser - handles picking up the config file from the command line, and sending a "help" message parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', action='store', dest='CONFIG_FILE', help='/full/path/to/config.file (usually hblink.cfg)') parser.add_argument('-l', '--logging', action='store', dest='LOG_LEVEL', help='Override config file logging level.') cli_args = parser.parse_args() # Ensure we have a path for the config file, if one wasn't specified, then use the execution directory if not cli_args.CONFIG_FILE: cli_args.CONFIG_FILE = os.path.dirname(os.path.abspath(__file__))+'/hblink.cfg' # Call the external routine to build the configuration dictionary CONFIG = config.build_config(cli_args.CONFIG_FILE) # Call the external routing to start the system logger if cli_args.LOG_LEVEL: CONFIG['LOGGER']['LOG_LEVEL'] = cli_args.LOG_LEVEL logger = log.config_logging(CONFIG['LOGGER']) logger.info('\n\nCopyright (c) 2013, 2014, 2015, 2016, 2018, 2019, 2020\n\tThe Regents of the K0USY Group. All rights reserved.\n') logger.debug('(GLOBAL) Logging system started, anything from here on gets logged') # Set up the signal handler def sig_handler(_signal, _frame): logger.info('(GLOBAL) SHUTDOWN: HBLINK IS TERMINATING WITH SIGNAL %s', str(_signal)) hblink_handler(_signal, _frame) logger.info('(GLOBAL) SHUTDOWN: ALL SYSTEM HANDLERS EXECUTED - STOPPING REACTOR') reactor.stop()
import data_provider import itertools import logging from config import build_config from models.vae_supervised import augment logger = logging.getLogger('main') # MNIST normalization parameters. NORM_STD = 0.3081 NORM_MEAN = 0.1307 MIN = -NORM_MEAN / NORM_STD + 1e-4 MAX = (1 - NORM_MEAN) / NORM_STD - 1e-4 # Which VAE model to use. config = build_config(sys.argv[1], logger) # Which example from data provider to modify. example_id = int(sys.argv[2]) # How many samples to get. n_trials = int(sys.argv[3]) mode = config['vae_expander']['mode'] dp = data_provider.DataProvider('train_labeled.p') vaes = augment(config, dp.loader) for i in range(n_trials): examples = list(itertools.islice(vaes, example_id + 1)) z = (examples[example_id][0] * NORM_STD + NORM_MEAN).clamp(0, 1)
from dl_utils import Brain2enDataset, MyCollator from eval_utils import evaluate_roc, evaluate_topk from models import PITOM, ConvNet10, MeNTAL, MeNTALmini from train_eval import plot_training, train, valid from vocab_builder import get_sp_vocab, get_std_vocab, get_vocab # from train_eval import * # datetime object containing current date and time now = datetime.now() dt_string = now.strftime("%A %d/%m/%Y %H:%M:%S") print("Start Time: ", dt_string) results_str = now.strftime("%Y-%m-%d-%H:%M") args = arg_parser() CONFIG = build_config(args, results_str) # sys.stdout = open(CONFIG["LOG_FILE"], 'w') # Model objectives MODEL_OBJ = { "ConvNet10": "classifier", "PITOM": "classifier", "MeNTALmini": "classifier", "MeNTAL": "seq2seq" } # GPUs DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') args.gpus = min(args.gpus, torch.cuda.device_count())
def main(): targs = build_config() parser = ArgumentParser() group = parser.add_mutually_exclusive_group(required=True) group.set_defaults(worker=False) group.set_defaults(evaluator=False) group.add_argument('--worker', dest='worker', action='store_true', help='Training') group.add_argument('--evaluator', dest='evaluator', action='store_true', help='Continuously evaluate model') parser.add_argument('--training_dir') parser.add_argument('--research_dir') parser.add_argument('--build_id') parser.add_argument('--only_train', default='False') parser.add_argument('--export', type=str_bool, help='Export model') parser.add_argument('--model_name') parser.add_argument('--model_version') args, _ = parser.parse_known_args() with open('faster_rcnn.config', 'r') as cf: data = cf.read() config_html = '<html><head></head><body><pre style="word-wrap: break-word; white-space: pre-wrap;">{}</pre></body></html>'.format( data) client.Client().update_task_info({'#documents.config.html': config_html}) sys.path.append(args.research_dir) num_steps = targs['num_steps'] model_dir = '{}/{}'.format(args.training_dir, args.build_id) config = tf.estimator.RunConfig(model_dir=model_dir) train_and_eval_dict = model_lib.create_estimator_and_inputs( run_config=config, hparams=model_hparams.create_hparams(None), pipeline_config_path='faster_rcnn.config', train_steps=num_steps, sample_1_of_n_eval_examples=1, sample_1_of_n_eval_on_train_examples=(5)) estimator = train_and_eval_dict['estimator'] train_input_fn = train_and_eval_dict['train_input_fn'] train_steps = train_and_eval_dict['train_steps'] eval_input_fns = train_and_eval_dict['eval_input_fns'] if args.evaluator: tf.logging.info('Starting Evaluation.') model_name = None model_version = None if args.export: model_name = args.model_name model_version = args.model_version continuous_eval(estimator, model_dir, eval_input_fns[0], 'validation_data', args, model_name, model_version) elif os.environ.get("TF_CONFIG", '') != '': tf.logging.info('Starting Distributed.') eval_on_train_input_fn = train_and_eval_dict['eval_on_train_input_fn'] predict_input_fn = train_and_eval_dict['predict_input_fn'] train_spec, eval_specs = model_lib.create_train_and_eval_specs( train_input_fn, eval_input_fns, eval_on_train_input_fn, predict_input_fn, train_steps, eval_on_train_data=False) tf.estimator.train_and_evaluate(estimator, train_spec, eval_specs[0]) else: tf.logging.info('Starting Training.') estimator.train(input_fn=train_input_fn, max_steps=train_steps)