def get_optparser(): usage = "usage: %prog <dataset_path>" dataset_options = '' for k in sorted(datasets.keys()): dataset_options += (" %s - %s \n" % (k, datasets[k].__name__)) usage = ("usage: %prog <DATASET> <DATASET_PATH>\n\n" "DATASET is an integer corresponding to the following supported " "datasets:\n" + dataset_options ) parser = optparse.OptionParser(usage=usage) model_default = models['default'] models.pop('default', None) model_opts = ' OPTIONS=%s' % (models.keys()) parser.add_option("--model", "-M", default=model_default, type="str", metavar="STR", help="[DEFAULT='%default']" + model_opts) return parser
def find_model_by_name(model_name): for name in models.keys(): if name == model_name: if not args.g: print('found model:', name) return models[name] raise ValueError(f'model caled "{model_name}" not found')
def parse_args(): cmd_args, print_help = _setup_args() from models import models if cmd_args.list_models: model_names = '\n'.join(m for m in models.keys()) print(f'Models\n------------\n{model_names}') exit(0) if cmd_args.model_name is None: print_help() exit(0) if cmd_args.model_name not in models: print(f'unknown model name: {cmd_args.model_name}') print_help() exit(1) args = { 'model_fn': models[cmd_args.model_name], 'epochs': cmd_args.epochs, 'checkpoint_dir': cmd_args.checkpoint_dir, 'frozen_filename': cmd_args.freeze } return args
def main(): args = utils.setup_parser(codes.get_code_names(), models.keys(), utils.decoder_names).parse_args() log_level = logging.DEBUG if args.debug else logging.INFO if args.console: utils.setup_console_logger(log_level) else: utils.setup_file_logger(args.data_dir, 'test', log_level) test(args)
def setup_parser(): # https://stackoverflow.com/questions/17073688/how-to-use-argparse-subparsers-correctly parser = argparse.ArgumentParser() parser.add_argument('channel', help='channel', choices=models.keys()) parser.add_argument('code', help='code name OR prefix') parser.add_argument('decoder', help='decoders list', nargs='+', choices=utils.decoder_names) parser.add_argument( 'type', help='plot type', choices=[ 'single', # plot of a code, channel, decoder 'comp_dec', # compare multiple decoders 'ensemble', # average and ensemble 'max_iter', # compare iterations cap 'compare', # compare with another 'hist_iter', # histogram and stats of iteration count 'avg_iter', # plot on average number of iterations ]) parser.add_argument('--max-iter', help='filter out multiple matches', type=int) parser.add_argument('--apprx', help='filter out multiple matches', type=int) parser.add_argument('--param', help='param', type=float) parser.add_argument('--mu', help='mu', type=float) parser.add_argument('--eps', help='epsilon', type=float) parser.add_argument( '--extra', help='code names to compare with in [type=compare] plots') parser.add_argument('--xlim', help='x-axis range', nargs=2, type=float) parser.add_argument('--ylim', help='y-axis range', nargs=2, type=float) parser.add_argument('--error', help='which error rate', default='ber', choices=['wer', 'ber']) parser.add_argument('--save', help='save as file name', type=str) parser.add_argument('--silent', help='do not show plot output', action='store_true') parser.add_argument('--agg', help='set matplotlib backend to Agg', action='store_true') return utils.bind_parser_common(parser)
from data import EnumPairedDataset from data import train_val_dataset from torchsummary import summary import torchvision from matplotlib import pyplot as plt import numpy as np from tqdm import tqdm import os import math from logs import add_line_to_csv import torch import piq from models import models from loss import loss as loss_fun model_names = ",".join(list(models.keys())) def main(args): input_shape = (3, 380, 380) if not os.path.exists(args.checkpoints_output): os.makedirs(args.checkpoints_output) if not os.path.exists(args.logs): os.makedirs(args.logs) images_output = os.path.join(args.logs, 'images') if not os.path.exists(images_output): os.makedirs(images_output)
def model_check(self): stats.reset() sys.path = [ MODEL_CHECKER_DIR, os.path.join(MODEL_CHECKER_DIR, "nox_lib") ] + sys.path sys.stdin = open("/dev/stdin", "r") # multiprocess closes it if config.get("runtime.debug_mc"): from lib.model_checker import DebugModelChecker else: from lib.model_checker import ModelChecker from lib.strategies import strategies, RandomWalk from models import models if not models.has_key(config.get("model.name")): utils.crash("Invalid model name: " + config.get("model.name") + ".\nValid model names are: " + ', '.join(models.keys())) if not strategies.has_key(config.get("strategy.name")): utils.crash("Invalid strategy name: " + config.get("strategy.name") + ".\nValid strategy names are: " + ', '.join(strategies.keys())) config.set("model.class", models[config.get("model.name")]) config.set("strategy.class", strategies[config.get("strategy.name")]) stats.pushProfile("model checker init") symbolic_options = {"queues": self.queues} if config.get("runtime.debug_mc"): mc = DebugModelChecker(symbolic_options) else: mc = ModelChecker(symbolic_options) stats.popProfile() try: stats.pushProfile("model checker run") mc.start() except KeyboardInterrupt: log_stats.error("INTERRUPTED!") except: print sys.exc_info()[1] finally: stats.popProfile() log_stats.info("--- Profiling info for model checker ---") log_stats.info("\n" + stats.getProfilingOutput()) log_stats.info("--- Results ---") log_stats.info("Total states: %d" % (len(mc.unique_states) + mc.old_states_count)) log_stats.info("Unique states: %d" % len(mc.unique_states)) log_stats.info("Revisited states: %d" % mc.old_states_count) log_stats.info("Maximum path length: %d" % mc.max_path_length) log_stats.info("Invariant violations: %d" % sum([x[0] for x in mc.violation_counter.values()])) for k in mc.violation_counter: if mc.violation_counter[k][0] == 0: log_stats.debug("%-30s: 0 violations" % k) else: violation = mc.violation_counter[k] log_stats.error( "%-30s: %d violations (first found after %.2fs, %d transitions)" % (k, violation[0], violation[1], violation[2])) if violation[3]: log_stats.error("PATH: %s" % violation[3]) if config.get("runtime.graph") is not None: mc.graph.saveToFile() if config.get("strategy.class") == RandomWalk: log_stats.warning("Random walk seed: %d", mc.strategy.seed)
def index(): return render_template("speechSynthesis.html", existing_models=models.keys())
def setup_parser(): parser = argparse.ArgumentParser() parser.add_argument('channel', help='channel', choices=models.keys()) parser.add_argument('prefix', help='prefix of code') parser.add_argument('decoder', help='decoder', choices=utils.decoder_names) return utils.bind_parser_common(parser)
import pandas as p from scipy.stats import norm, uniform from pyvar import MinnesotaPrior, SimsZhaSVARPrior, BayesianVAR from fortress import make_smc import sympy from models import models as models, InvGamma, data as data parser = argparse.ArgumentParser(description='Estimation a model with SMC.') parser.add_argument('--model', help='the model to estimate', action='store', choices=models.keys(), default='5eq') parser.add_argument('--nsim', help='# of particles', action='store', type=int, default=9600) parser.add_argument('--nproc', help='# of processors', action='store', type=int, default=4) args = parser.parse_args() model = models[args.model]
import numpy as np import pandas as p import matplotlib.pyplot as plt from tqdm import tqdm import json from collections import defaultdict from models import yy, models parser = argparse.ArgumentParser(description='plot impulse resposes.') parser.add_argument('--model', help='the model(s) to plot', action='store', choices=models.keys(), default='5eq', nargs='*') parser.add_argument('--overlay', help='means to overlay', action='store', choices=[None] + list(models.keys()), default=None, nargs='*') parser.add_argument('--nsim', help='the number of posterior draws to use', action='store', type=int, default=9600) parser.add_argument('--horizon', help='horizon of IRF',