def get_parser_continuous(parser: configargparse.ArgumentParser, config_path=None): parser = get_parser_common(parser, "default_c.yml") parser.add( "--units", action="append", type=yaml.safe_load, help="shape of each unit", ) parser.add( "--powers", action="append", type=yaml.safe_load, help="power of each unit", ) parser.add( "--angles", action="append", type=float, help="angle of each unit", ) parser.add( "--sampler", type=str, choices=["sequence", "gibbs"], help="sampler method", ) return parser
def train_idr0017(): parser = ArgumentParser() parser.add('-c', '--config', is_config_file=True, help='config file path') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--experiment_name", type=str, default="betaVAE_MNIST") parser.add_argument("--model_dir", type=str, default="/home/gvisona/Desktop/runs") parser.add_argument("--log_dir", type=str, default="/home/gvisona/Desktop/runs") parser.add_argument("--gpus", type=int, default=0) parser.add_argument("--max_epochs", type=int, default=2) parser = ConvolutionalBetaVAE.add_model_specific_args(parser) hparams = parser.parse_args() # set seeds torch.manual_seed(hparams.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(hparams.seed) tb_logger = loggers.TensorBoardLogger(hparams.log_dir) with open(os.path.join(hparams.log_dir, 'vae_config.yml'), 'w') as outfile: yaml.dump(hparams.__dict__, outfile, default_flow_style=False) transform = lambda im: RandomCropTransform(im, 128) train_ds = IDR0017_FullImgs_ZarrDataset( "/media/gvisona/GioData/idr0017/data/idr0017.zarr", training=True, transform=transform) val_ds = IDR0017_FullImgs_ZarrDataset( "/media/gvisona/GioData/idr0017/data/idr0017.zarr", training=True, transform=transform) model = ConvolutionalBetaVAE(hparams, train_ds, val_ds) print("HELLO")
def main_arguments_parser(): def str2bool(v): return str(v).lower() in ('yes', 'true', 't', 'y', '1') parser = ArgumentParser(description='Pigeoo', default_config_files=[ './pigeoo.rc', '~/.pigeoo.rc' ]) # TODO: use standard config file? parser.add('-c', '--config', is_config_file=True, help='config file path') # parser parser.add_argument('--generate', '-g', type=str2bool, nargs='?', default=True, help='Output folder for documentation.') parser.add_argument('--output_path', '-o', type=str, nargs='?', help='Output folder for documentation.') parser.add_argument('--paths', '-p', type=str, nargs='?', default=PATHS, help='Comma separated list of paths.') parser.add_argument( '--local', '-l', type=str2bool, nargs='?', default=True, help='If run in local mode, documentation contains links to files.') parser.add_argument( '--modules', '-m', type=str, nargs='?', default=[], help='If set, restrict the modules to their dependencies.') return parser
def get_parser_discrete(parser: configargparse.ArgumentParser, config_path=None): # parser = configargparse.ArgParser( # default_config_files=[str(config_path)], # description="Generate layout dataset.", # config_file_parser_class=configargparse.YAMLConfigFileParser, # ) parser = get_parser_common(parser, "default.yml") parser.add( "--power", action="append", type=float, help="possible power of each unit", ) parser.add("--unit_n", type=int, help="number of units") parser.add("--sampler", type=str, choices=["uniform"], help="sampler method") return parser
def get_parser_continuous_power(parser: configargparse.ArgumentParser, config_path=None): parser = get_parser_common(parser, "default_c_power.yml") parser.add( "--units", action="append", type=yaml.safe_load, help="shape of each unit", ) parser.add( "--angles", action="append", type=float, help="angle of each unit", ) parser.add( "--powers", action="append", type=yaml.safe_load, help="power of each unit", ) parser.add( "--positions", action="append", type=yaml.safe_load, help="positions of each unit", ) parser.add( "--positions_type", type=str, choices=['coord', 'grid'], help="the type of input positions: coord or grid", ) parser.add( "--observation_points", action="append", type=yaml.safe_load, default=None, help="positions of chosen observation points", ) parser.add( "--observation_points_type", type=str, choices=['coord', 'grid'], help="the type of observation points: coord or grid", ) return parser
def parse_arguments(): descriptionTXT = "Lee un dataset versionado (fichero CSV en GIT) y genera un CSV con los últimos valores de cada datos" parser = ArgumentParser(description=descriptionTXT) parser.add('-v', dest='verbose', action="count", env_var='GTS_VERBOSE', required=False, help='', default=0) parser.add('-d', dest='debug', action="store_true", env_var='GTS_DEBUG', required=False, help='', default=False) parser.add('-i', dest='infile', type=str, env_var='GTS_INFILE', help='Fichero de entrada', required=False) parser.add('-o', dest='outfile', type=str, env_var='GTS_OUTFILE', help='Fichero de salida', required=True) parser.add('-r', dest='repoPath', type=str, env_var='GTS_REPOPATH', help='Directorio base del repositorio GIT', required=True) parser.add('-f', dest='csvPath', type=str, env_var='GTS_CSVPATH', help='Ubicación del fichero de dataset dentro del repositorio GIT', required=True) parser.add('-t', dest='colIndice', type=str, env_var='GTS_INDEXCOL', choices=['zona_basica_salud', 'municipio_distrito'], help='Columnas se van a usar como indice del dataframe', required=True) parser.add('-c', dest='create', action="store_true", env_var='GTS_CREATE', required=False, help='Inicializa el fichero si no existe ya', default=False) args = parser.parse_args() return args
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ NO FUNCIONA: Usar GetSuperManagerMerged """ from configargparse import ArgumentParser from SMACB.SuperManager import SuperManagerACB if __name__ == '__main__': parser = ArgumentParser() parser.add('-u', dest='user', type=str, env_var='SM_USER', required=True) parser.add('-p', dest='password', type=str, env_var='SM_PASSWORD', required=True) parser.add('-l', dest='league', type=str, env_var='SM_LEAGUE', required=False) parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=False) parser.add('-o', dest='outfile', type=str, env_var='SM_OUTFILE', required=False) # parser.add_argument('-i', '--input', type=str, required=False, dest='infile') args = parser.parse_args() sm = SuperManagerACB(config=args) if 'infile' in args and args.infile: sm.loadData(args.infile) # sm = SuperManagerACB(config=args) sm.Connect()
def train_mnist(): parser = ArgumentParser( config_file_parser_class=YAMLConfigFileParser, default_config_files=[ 'mnist_config.yml', "/home/gvisona/Projects/interpreting-representations/dsrepr/experiments/config/mnist_config.yml" ]) parser.add('-c', '--config', required=True, is_config_file=True, help='config file path') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--experiment_name", type=str, default="betaVAE_MNIST") parser.add_argument("--neptune_project", type=str, default="gvisona/idr0017") parser.add_argument("--model_dir", type=str, default=DATAHOME) parser.add_argument("--log_dir", type=str, default=DATAHOME) parser.add_argument( "--log_level", default="INFO", type=_log_level_string_to_int, nargs='?', help="Set the logging output level. {0}".format(_LOG_LEVEL_STRINGS)) parser.add_argument("--gpus", type=int, default=0) parser.add_argument("--max_epochs", type=int, default=10) parser = ConvolutionalBetaVAE.add_model_specific_args(parser) hparams = parser.parse_args() # set seeds torch.manual_seed(hparams.seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False np.random.seed(hparams.seed) tb_logger = loggers.TensorBoardLogger(hparams.log_dir) neptune_logger = loggers.NeptuneLogger( project_name=hparams.neptune_project, params=vars(hparams), experiment_name=hparams.experiment_name) logging.basicConfig(level=hparams.log_level) with open(os.path.join(hparams.log_dir, 'config.yml'), 'w') as outfile: yaml.dump(hparams.__dict__, outfile, default_flow_style=False) train_ds = MNIST(TRIALDATA, train=True, download=True, transform=torchvision.transforms.ToTensor()) val_ds = MNIST(TRIALDATA, train=False, download=True, transform=torchvision.transforms.ToTensor()) model = ConvolutionalBetaVAE(hparams, train_ds, val_ds) early_stopping_cb = EarlyStopping('val_loss', patience=5) trainer = Trainer(gpus=hparams.gpus, max_epochs=hparams.max_epochs, default_save_path=hparams.model_dir, logger=[tb_logger, neptune_logger], callbacks=[early_stopping_cb]) trainer.fit(model)
resultados[clave][k]['median'] = median(auxVals) if lv else "-" resultados[clave][k]['mean'] = mean(auxVals) if lv else "-" resultados[clave][k]['stdev'] = stdev(auxVals) if lv > 1 else "-" resultados['jug'] = Rjug resultados['vict'] = Rvict return resultados if __name__ == '__main__': parser = ArgumentParser() parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t',
def main(debug=False, options_flag=False): parser = ArgumentParser() parser.add( "-V", "--version", action="version", version=f"layout-generator version: {__version__}", ) subparsers = parser.add_subparsers(title="commands") generate_parser = subparsers.add_parser( "generate", help="generate discrete layout data") generate_parser = get_parser_discrete(generate_parser) generate_parser.set_defaults(handle=handle_generate, parser=generate_parser) generate_c_parser = subparsers.add_parser( "generate_c", help="generate continuous layout data") generate_c_parser = get_parser_continuous(generate_c_parser) generate_c_parser.set_defaults(handle=handle_generate_c, parser=generate_c_parser) generate_c_power_parser = subparsers.add_parser( "generate_c_power", help="generate continuous layout data with sampling component powers", ) generate_c_power_parser = get_parser_continuous_power( generate_c_power_parser) generate_c_power_parser.set_defaults(handle=handle_generate_c_power, parser=generate_c_power_parser) plot_parser = subparsers.add_parser("plot", help="plot layout data") plot_parser = get_plot_parser(plot_parser) plot_parser.set_defaults(handle=handle_plot) convert_parser = subparsers.add_parser("convert", help="convert layout data") convert_parser = get_convert_parser(convert_parser) convert_parser.set_defaults(handle=handle_convert) makeconfig_parser = subparsers.add_parser("makeconfig", help="make template config") makeconfig_parser = get_parser_makeconfig(makeconfig_parser) makeconfig_parser.set_defaults(handle=handle_makeconfig) options, _ = parser.parse_known_args() if hasattr(options, "test") and options.test: # 仅测试,输出参数 print(parser.format_values()) print(options) # print(sys.argv) parser.exit() if debug: del_parser(options) return parser, options if hasattr(options, "handle"): options.handle(options) if options_flag: return options
query_constraints.append(col_constraint(i)) query = "FIND p[1, 1, 28, 28]\nS.T.\n" query += ",\n".join(query_constraints) query += "\nRETURN clamp(p + M_nine, 0, 1)" success, r, t = q.Query(query, context=context, args=args).run() return success, r, t parser = ArgumentParser(description='DL2 Querying', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = dl2.add_default_parser_args(parser, query=True) parser.add("--instances", type=int, default=10, required=False, help="max number of instances to run per query") parser.add("--query", type=int, choices=[1, 2, 3], default=1, required=False, help="max number of instances to run per query") parser.add("-a", type=int, default=None, required=False, help="argument") parser.add("--plot", type=dl2.str2bool, default=False, required=False, help="argument") args = parser.parse_args()
def procesaArgumentos(): parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True) parser.add('-j', dest='jornada', type=int, required=True) parser.add('-s', '--include-socio', dest='socioIn', type=str, action="append") parser.add('-e', '--exclude-socio', dest='socioOut', type=str, action="append") parser.add('-l', '--lista-socios', dest='listaSocios', action="store_true", default=False) parser.add("--data-dir", dest="datadir", type=str, default=LOCATIONCACHE) parser.add("--solution-file", dest="solfile", type=str) parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('--logdir', dest='logdir', type=str, env_var='SM_LOGDIR', required=False) parser.add('otherthings', nargs='*') args = parser.parse_args() return args
output = torch.transpose(x, self.dims[0], self.dims[1]) return output class LSTMAdapter(nn.Module): def __init__(self): super(LSTMAdapter, self).__init__() def forward(self, x): return x[0] parser = ArgumentParser(description='extractor.py') parser.add('-datafile', '--datafile', default='roto-ie.h5', help='path to hdf5 file containing train/val data') parser.add('-batchsize', '--batchsize', default=32, type=int, help='batch size') parser.add('-embed_size', '--embed_size', default=200, type=int, help='size of embeddings') parser.add('-num_filters', '--num_filters', default=200, type=int,
def procesaArgumentos(): parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True) parser.add('-j', dest='jornada', type=int, required=True) parser.add('-s', '--include-socio', dest='socioIn', type=str, action="append") parser.add('-e', '--exclude-socio', dest='socioOut', type=str, action="append") parser.add('-l', '--lista-socios', dest='listaSocios', action="store_true", default=False) parser.add('-b', '--backend', dest='backend', choices=BACKENDCHOICES, default='joblib') parser.add('-x', '--scheduler', dest='scheduler', type=str, default='127.0.0.1') parser.add("-o", "--output-dir", dest="outputdir", type=str, default=LOCATIONCACHE) parser.add('-p', '--package', dest='package', type=str, action="append") parser.add('--keySearchOrder', dest='searchOrder', type=str) parser.add('--nproc', dest='nproc', type=int, default=NJOBS) parser.add('--memworker', dest='memworker', default=MEMWORKER) parser.add('--joblibmode', dest='joblibmode', choices=JOBLIBCHOICES, default='threads') parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('--logdir', dest='logdir', type=str, env_var='SM_LOGDIR', required=False) args = vars(parser.parse_args()) if 'searchOrder' in args: args['clavesSeq'] = keySearchOrderParameter(args['searchOrder']) else: args['clavesSeq'] = SEQCLAVES return Namespace(**args)
def get_arguments(): parser = ArgumentParser('Argument Parser') parser.add('-c', '--config', is_config_file=True, help='Path to config file') parser.add('--path', type=str, default='.', help='Path to experiment') parser.add('--add_ducts', type=boolean_string, default=True, help='Should new fiber ducts be constructed?') parser.add( '--time_heuristic_fwd', type=boolean_string, default=True, help= 'True if forward greedy heuristic to use when iterating over time, false if backwards heuristic' ) # Cutoff argument parser.add('--cutoff', type=float, default=1e-3, help='cutoff for scenario creation') # Arguments for demand history parser.add( '--demand_scale', type=float, default=1., help= 'How should the stored demand be scaled. demand_scale * demand[demand_no] is the initial demand.' ) parser.add('--timesteps', type=int, default=5, help='Number of timesteps to optimize for') parser.add('--demand_no', type=int, default=0, help='Demand file in TOPOLOGY\'s folder') # Topology arguments parser.add( '--topology', type=str, default='B4_000', help='Name of the topology that can be found in the topology folder') # SLA arguments parser.add('--alpha', type=float, default=0.999, help='Minimum availability for each flow') # Path arguments parser.add( '--path_selection', type=str, default='KSP-4', help= 'Path selction algorithm. KSP-4 -> KSP max 4 path, PST-6 paths shorter than 6' ) # Cost relevant arguments parser.add('--wavelength_capacity', type=float, default=400, help='Number of Gbps per wavelength') parser.add('--n_wavelengths_fiber', type=float, default=64, help='Number of wavelengths per fiber') parser.add('--gbps_cost', type=float, default=10, help='Cost in $ per Gbps for transceivers') parser.add('--transceiver_amortization_years', type=float, default=3, help='Amortization years for transceivers') parser.add('--fiber_cost', type=float, default=3600, help='Cost in $ per year per fiber') parser.add('--n_fibers_per_fiberduct', type=float, default=200, help='Number of fibers built together') args = parser.parse_args() return args
def get_parser_common(parser: configargparse.ArgumentParser, config_name: str): config_path = here / config_name assert config_path.exists(), "Config do not exist!" parser._config_file_parser = configargparse.YAMLConfigFileParser() parser._default_config_files = [str(config_path)] parser.add("--config", is_config_file=True, help="config file path") parser.add("--test", action="store_true", help="test mode") parser.add("--length", type=float, help="board length") parser.add("--length_unit", type=float, help="unit length") parser.add( "--bcs", type=yaml.safe_load, action="append", help="Dirichlet boundaries", ) parser.add("--data_dir", type=str, help="dir to store generated layout data") parser.add("--fem_degree", type=int, help="fem degree in fenics") parser.add("--u_D", type=int, help="value on Dirichlet boundary") parser.add("--nx", type=int, help="number of grid in x direction") # parser.add('--ny', type=int, help='number of grid in y direction') # parser.add('--nz', type=int, help='number of grid in z direction') parser.add("--sample_n", type=int, help="number of samples") parser.add( "--seed", type=int, default=np.random.randint(2**32), help="seed in np.random module", ) parser.add("--file_format", type=str, choices=["mat"], help="dataset file format") parser.add("--prefix", type=str, help="prefix of file") parser.add( "--method", type=str, choices=["fenics"], help="method to solve the equation", ) parser.add("--worker", type=int, default=os.cpu_count(), help="number of workers") parser.add("--ndim", type=int, choices=[2, 3], help="dimension") parser.add("--vtk", action="store_true", default=False, help="output vtk file") parser.add( "-V", "--version", action="version", version=f"layout-generator version: {__version__}", ) parser.add("--task", help="task", choices=["discrete", "continuous"], type=str) return parser
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from configargparse import ArgumentParser from mechanicalsoup import StatefulBrowser from SMACB.CalendarioACB import BuscaCalendario, CalendarioACB from SMACB.PartidoACB import PartidoACB from Utils.Web import ExtraeGetParams if __name__ == '__main__': parser = ArgumentParser() parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('-j', dest='justone', action="store_true", env_var='SM_JUSTONE', required=False, default=False)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from configargparse import ArgumentParser from SMACB.Constants import OtherLoc # from SMACB.MercadoPage import MercadoPageContent from SMACB.SuperManager import SuperManagerACB from SMACB.TemporadaACB import TemporadaACB # from Utils.Misc import ReadFile if __name__ == '__main__': parser = ArgumentParser() parser.add('-i', dest='sminfile', type=str, env_var='SM_INFILE', required=False) parser.add('-o', dest='smoutfile', type=str, env_var='SM_OUTFILE', required=False) parser.add('-t', dest='tempin', type=str, env_var='SM_TEMPIN', required=False) parser.add('-x', dest='tempout', type=str, env_var='SM_TEMPOUT', required=False) parser.add_argument(dest='files', type=str, nargs='*') args = parser.parse_args() sm = SuperManagerACB() if 'sminfile' in args and args.sminfile: sm.loadData(args.sminfile) temporada = None if 'tempin' in args and args.tempin: temporada = TemporadaACB()
def add_arg(parser: ArgumentParser): g = parser.add_argument_group("My Model config") g.add_argument("--num_layers", type=int, help="number of layers") g.add_argument("--hidden_size", type=int, help="size of hidden layers") # 不需要什么 Config 类, 一切就在往 parser 中添加的那些 key 中处理了 @classmethod def from_spec(cls, spec: Namespace): args = spec.num_layers, spec.hidden_size return cls(*args) if __name__ == "__main__": parser = ArgumentParser(description="What does this file do?") parser.add_argument("-e", "--epoch", type=int, help="epochs to train the model") # model config parser.add('-c', '--config', required=True, is_config_file=True, help='config file path') MyModel.add_arg(parser) args, rest = parser.parse_known_args() model = MyModel.from_spec(args) print(args)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys from configargparse import ArgumentParser from mechanicalsoup import StatefulBrowser from SMACB.CalendarioACB import calendario_URLBASE from SMACB.TemporadaACB import TemporadaACB from Utils.Web import ExtraeGetParams parser = ArgumentParser() parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, help='', default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, help='', default=False) parser.add('-j', dest='justone', action="store_true", env_var='SM_JUSTONE', required=False,
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from configargparse import ArgumentParser from SMACB.TemporadaACB import TemporadaACB # from Utils.Misc import ReadFile if __name__ == '__main__': parser = ArgumentParser() parser.add('-t', dest='tempin', type=str, env_var='SM_TEMPIN', required=True) parser.add('-x', dest='tempout', type=str, env_var='SM_TEMPOUT', required=False) parser.add_argument(dest='files', type=str, nargs='*') args = parser.parse_args() temporada = None if 'tempin' in args and args.tempin: temporada = TemporadaACB() temporada.cargaTemporada(args.tempin)
def add_parser_args(parser: ArgumentParser) -> None: parser.add('-v', '--version', help='version', action='version', version=version) parser.add( '-d', '--directory', action='append', help='IaC root directory (can not be used together with --file).') parser.add('--add-check', action='store_true', help="Generate a new check via CLI prompt") parser.add('-f', '--file', action='append', help='IaC file(can not be used together with --directory)') parser.add( '--skip-path', action='append', help= 'Path (file or directory) to skip, using regular expression logic, relative to current ' 'working directory. Word boundaries are not implicit; i.e., specifying "dir1" will skip any ' 'directory or subdirectory named "dir1". Ignored with -f. Can be specified multiple times.' ) parser.add( '--external-checks-dir', action='append', help='Directory for custom checks to be loaded. Can be repeated') parser.add( '--external-checks-git', action='append', help= 'Github url of external checks to be added. \n you can specify a subdirectory after a ' 'double-slash //. \n cannot be used together with --external-checks-dir' ) parser.add('-l', '--list', help='List checks', action='store_true') parser.add( '-o', '--output', action='append', choices=OUTPUT_CHOICES, default=None, help= 'Report output format. Add multiple outputs by using the flag multiple times (-o sarif -o cli)' ) parser.add( '--output-file-path', default=None, help= 'Name for output file. The first selected output via output flag will be saved to the file (default output is cli)' ) parser.add( '--output-bc-ids', action='store_true', help= 'Print Bridgecrew platform IDs (BC...) instead of Checkov IDs (CKV...), if the check exists in the platform' ) parser.add( '--no-guide', action='store_true', default=False, help= 'Do not fetch Bridgecrew platform IDs and guidelines for the checkov output report. Note: this ' 'prevents Bridgecrew platform check IDs from being used anywhere in the CLI.' ) parser.add('--quiet', action='store_true', default=False, help='in case of CLI output, display only failed checks') parser.add('--compact', action='store_true', default=False, help='in case of CLI output, do not display code blocks') parser.add( '--framework', help= 'filter scan to run only on specific infrastructure code frameworks', choices=checkov_runners + ["all"], default=['all'], nargs="+") parser.add( '--skip-framework', help='filter scan to skip specific infrastructure code frameworks. \n' 'will be included automatically for some frameworks if system dependencies ' 'are missing.', choices=checkov_runners, default=None, nargs="+") parser.add( '-c', '--check', help= 'filter scan to run only on a specific check identifier(allowlist), You can ' 'specify multiple checks separated by comma delimiter', action='append', default=None, env_var='CKV_CHECK') parser.add( '--skip-check', help= 'filter scan to run on all check but a specific check identifier(denylist), You can ' 'specify multiple checks separated by comma delimiter', action='append', default=None, env_var='CKV_SKIP_CHECK') parser.add( '--run-all-external-checks', action='store_true', help= 'Run all external checks (loaded via --external-checks options) even if the checks are not present ' 'in the --check list. This allows you to always ensure that new checks present in the external ' 'source are used. If an external check is included in --skip-check, it will still be skipped.' ) parser.add('--bc-api-key', env_var='BC_API_KEY', sanitize=True, help='Bridgecrew API key') parser.add( '--prisma-api-url', env_var='PRISMA_API_URL', default=None, help= 'The Prisma Cloud API URL (see: https://prisma.pan.dev/api/cloud/api-urls). ' 'Requires --bc-api-key to be a Prisma Cloud Access Key in the following format: <access_key_id>::<secret_key>' ) parser.add( '--docker-image', help= 'Scan docker images by name or ID. Only works with --bc-api-key flag') parser.add('--dockerfile-path', help='Path to the Dockerfile of the scanned docker image') parser.add( '--repo-id', help= 'Identity string of the repository, with form <repo_owner>/<repo_name>' ) parser.add( '-b', '--branch', help= "Selected branch of the persisted repository. Only has effect when using the --bc-api-key flag", default='master') parser.add( '--skip-fixes', help= 'Do not download fixed resource templates from Bridgecrew. Only has effect when using the ' '--bc-api-key flag', action='store_true') parser.add( '--skip-suppressions', help= 'Do not download preconfigured suppressions from the Bridgecrew platform. Code comment ' 'suppressions will still be honored. ' 'Only has effect when using the --bc-api-key flag', action='store_true') parser.add( '--skip-policy-download', help= 'Do not download custom policies configured in the Bridgecrew platform. ' 'Only has effect when using the --bc-api-key flag', action='store_true') parser.add( '--download-external-modules', help= "download external terraform modules from public git repositories and terraform registry", default=os.environ.get('DOWNLOAD_EXTERNAL_MODULES', False), env_var='DOWNLOAD_EXTERNAL_MODULES') parser.add( '--var-file', action='append', help='Variable files to load in addition to the default files (see ' 'https://www.terraform.io/docs/language/values/variables.html#variable-definitions-tfvars-files).' 'Currently only supported for source Terraform (.tf file), and Helm chart scans.' 'Requires using --directory, not --file.') parser.add('--external-modules-download-path', help="set the path for the download external terraform modules", default=DEFAULT_EXTERNAL_MODULES_DIR, env_var='EXTERNAL_MODULES_DIR') parser.add('--evaluate-variables', help="evaluate the values of variables and locals", default=True) parser.add('-ca', '--ca-certificate', help='Custom CA certificate (bundle) file', default=None, env_var='BC_CA_BUNDLE') parser.add( '--repo-root-for-plan-enrichment', help= 'Directory containing the hcl code used to generate a given plan file. Use with -f.', dest="repo_root_for_plan_enrichment", action='append') parser.add('--config-file', help='path to the Checkov configuration YAML file', is_config_file=True, default=None) parser.add( '--create-config', help= 'takes the current command line args and writes them out to a config file at ' 'the given path', is_write_out_config_file_arg=True, default=None) parser.add( '--show-config', help='prints all args and config settings and where they came from ' '(eg. commandline, config file, environment variable or default)', action='store_true', default=None) parser.add( '--create-baseline', help= 'Alongside outputting the findings, save all results to .checkov.baseline file' ' so future runs will not re-flag the same noise. Works only with `--directory` flag', action='store_true', default=False) parser.add( '--baseline', help= ("Use a .checkov.baseline file to compare current results with a known baseline. " "Report will include only failed checks that are new with respect to the provided baseline" ), default=None, ) parser.add( '--min-cve-severity', help= 'Set minimum severity that will cause returning non-zero exit code', choices=SEVERITY_RANKING.keys(), default='none') parser.add( '--skip-cve-package', help= 'filter scan to run on all packages but a specific package identifier (denylist), You can ' 'specify this argument multiple times to skip multiple packages', action='append', default=None) # Add mutually exclusive groups of arguments exit_code_group = parser.add_mutually_exclusive_group() exit_code_group.add('-s', '--soft-fail', help='Runs checks but suppresses error code', action='store_true') exit_code_group.add( '--soft-fail-on', help='Exits with a 0 exit code for specified checks. You can specify ' 'multiple checks separated by comma delimiter', action='append', default=None) exit_code_group.add( '--hard-fail-on', help= 'Exits with a non-zero exit code for specified checks. You can specify ' 'multiple checks separated by comma delimiter', action='append', default=None)
def eval(): p = ArgumentParser(default_config_files=[join(dirname(__file__), '.eval.cfg')]) p.add('--config', '-c', is_config_file=True, help="config file path") p.add('--weights', type=str, help="path to the classifier weights") p.add('--data', type=str, help="test data, a file with each line as an .npy filename") p.add('--output', '-o', type=str, help="folder to hold outputs", default='.') p.add('--path', type=str, action='append', help='search paths for files') args = p.parse_args() try: os.makedirs(args.output) except OSError: pass # If weights is None, this will default to the weights in the models folder, # or the ones indicated by the environment variable net = i12.net(args.weights) files = [complete(p, args.path) for p in open(args.data).readlines()] summary = OrderedDict() for label in i12.LABELS: summary[label] = OrderedDict() summary[label].update(TP=0) summary[label].update(TN=0) summary[label].update(FP=0) summary[label].update(FN=0) summary[label].update(targets=0) summary[label].update(candidates=0) summary[label].update(hits=0) for file_index, file in enumerate(files): print(file_index+1, "of", len(files), ":", file) cached_file = os.path.join(args.output, "metrics-{:04}.yml".format(file_index)) data = np.load(file) rgb = channels_last(data[:3].astype(np.uint8)) if not os.path.isfile(os.path.join(args.output, 'file-{}.jpg'.format(file_index))): skimage.io.imsave(os.path.join(args.output, 'file-{}.jpg'.format(file_index)), rgb) all_expected = data[3:] if not os.path.isfile(os.path.join(args.output, 'file-{}-windows.jpg'.format(file_index))): skimage.io.imsave(os.path.join(args.output, 'file-{}-windows.jpg'.format(file_index)), skimage.color.gray2rgb((all_expected[ pyfacades.models.independant_12_layers.model.WINDOW] == 2).astype(float))) if not os.path.isfile(cached_file): print("Calculating metrics for file", file) #data = np.load(file) #rgb = channels_last(data[:3].astype(np.uint8)) #all_expected = data[3:] all_predicted = i12.process_strip(channels_first(rgb)) results = OrderedDict() for label_index, label in enumerate(i12.LABELS): if label == 'background': continue # Skip the top row and the bottom row -- I gave those dummy labels to work around a caffe error expected = all_expected[label_index][1:-1] # Uggh! Since the 'UNKNOWN' label is removed, argmax gives the wrong values; I need 0->0, 1->2, 2->3 predicted = all_predicted.features[label_index].argmax(0)[1:-1]+1 predicted[predicted==LABEL_UNKNOWN] = LABEL_NEGATIVE metrics = Metrics(expected, predicted, source=file, feature=label, threshold=0.5) results[label] = metrics.as_dict() viz = viz_pixel_labels(expected, predicted, rgb[1:-1], label_negative=LABEL_NEGATIVE, label_positive=LABEL_POSITIVE) skimage.io.imsave(os.path.join(args.output, 'file-{}-viz-{}.jpg'.format(file_index, label)), viz) with open(cached_file, 'w') as f: yaml.dump(results, f, Dumper=yamlordereddictloader.Dumper) cached = yaml.load(open(cached_file)) print("Cumulative:") for label in i12.LABELS: if label == 'background': continue metrics = Metrics(**cached[label]) assert metrics.source == file summary[label]['TP'] += metrics.TP summary[label]['FP'] += metrics.FP summary[label]['TN'] += metrics.TN summary[label]['FN'] += metrics.FN summary[label]['targets'] += metrics.targets summary[label]['candidates'] += metrics.candidates summary[label]['hits'] += metrics.hits cum = Metrics(**summary[label]) print("{:10}: pix(P:{:2.5f}, R:{:2.5f},F:{:2.5f}), obj:(P:{:2.5f}, R:{:2.5f},F:{:2.5f})".format( label, cum.pixel_precision, cum.pixel_recall, cum.pixel_f1, cum.object_precision, cum.object_recall, cum.object_f1))
def procesaArgumentos(): parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=True) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=True) parser.add('-j', dest='jornada', type=int, required=True) parser.add('-l', '--lista-socios', dest='listaSocios', action="store_true", default=False) parser.add("-o", "--output-dir", dest="outputdir", type=str, default=LOCATIONCACHE) parser.add('--nproc', dest='nproc', type=int, default=NJOBS) parser.add('--memworker', dest='memworker', default=MEMWORKER) parser.add('--joblibmode', dest='joblibmode', choices=JOBLIBCHOICES, default='threads') parser.add('-v', dest='verbose', action="count", env_var='SM_VERBOSE', required=False, default=0) parser.add('-d', dest='debug', action="store_true", env_var='SM_DEBUG', required=False, default=False) parser.add('--logdir', dest='logdir', type=str, env_var='SM_LOGDIR', required=False) # args = vars(parser.parse_args()) # return Namespace(**args) return parser.parse_args()
def listaMercados(sm): mercadoDatos = {x: "" for x in sm.mercado} for j in sm.mercadoJornada: mercadoDatos[sm.mercadoJornada[j]] = "<- J%i" % (j) listaMercados = list(mercadoDatos.keys()) listaMercados.sort() for j in listaMercados: print(" %s %s" % (j, mercadoDatos[j])) if __name__ == '__main__': parser = ArgumentParser() parser.add('-i', dest='infile', type=str, env_var='SM_INFILE', required=False) parser.add('-o', dest='outfile', type=str, env_var='SM_OUTFILE', required=False) parser.add('-t', dest='temporada', type=str, env_var='SM_TEMPORADA', required=False) parser.add('-j', dest='jornada', action='append', required=False) parser.add('-l', dest='list',
from context import get_context from evaluation_queries import get_queries from configargparse import ArgumentParser import argparse import torch import signal import time import random import numpy as np parser = ArgumentParser(description='DL2 Querying', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = dl2.add_default_parser_args(parser, query=True) parser.add("--instances", type=int, default=10, required=False, help="max number of instances to run per query") parser.add("--glob-pattern", type=str, default="*.tdql", required=False, help="pattern to glob for tdql files in ./evaluation_queries") parser.add('--dataset', choices=['MNIST', 'FASHION_MNIST', 'CIFAR', 'GTSRB', 'IMAGENT'], default=['MNIST', 'FASHION_MNIST', 'CIFAR', 'GTSRB', 'IMAGENT'], nargs='+', help='datasets to use') args = parser.parse_args() args.cuda = args.cuda and torch.cuda.is_available() random.seed(42)