def _parse_args(): parser = argment_parser("Run Vega") parser.add_argument("config_file", default=None, type=str, help="Pipeline config file name") group_backend = parser.add_argument_group( title="set backend and device, priority: specified in the command line > " "specified in the configuration file > default settings(pytorch and GPU)") group_backend.add_argument("-b", "--backend", default=None, type=str, choices=["pytorch", "p", "tensorflow", "t", "mindspore", "m"], help="set training platform") group_backend.add_argument("-d", "--device", default=None, type=str, choices=["GPU", "NPU"], help="set training device") group_resume = parser.add_argument_group(title="Resume not finished task") group_resume.add_argument("-r", "--resume", action='store_true', help="resume not finished task") group_resume.add_argument("-t", "--task_id", default=None, type=str, help="specify the ID of the task to be resumed") group_config = parser.add_argument_group(title='Modify config for yml') group_config.add_argument("-m", "--modify", action='store_true', help="modify some config") group_config.add_argument("-dt", "--dataset", default=None, type=str, help='modify dataset for all pipe_step') group_config.add_argument("-dp", "--data_path", default=None, type=str, help="modify data_path for all pipe_step") group_config.add_argument("-bs", "--batch_size", default=None, type=str, help='modify batch_size of dataset for all pipe_step') group_config.add_argument("-es", "--epochs", default=None, type=str, help='modify fully_train epochs') args = parser.parse_args() return args
def _parse_args(desc): parser = argment_parser(desc) group = parser.add_mutually_exclusive_group(required=True) group.add_argument( "-p", "--pid", type=int, help="kill Vega main process based on the specified process ID") group.add_argument( "-t", "--task_id", type=str, help= "kill Vega main process based on the specified Vega application task ID" ) group.add_argument("-a", "--all", action='store_true', help="kill all Vega main process") group.add_argument( "-f", "--force", action='store_true', help= "Forcibly kill all Vega-related processes even if the main process does not exist" ) args = parser.parse_args() return args
def _parse_args(): parser = argment_parser("Verify cluster.") parser.add_argument("-m", "--master", default=None, type=str, required=True, help="master node IP") parser.add_argument("-s", "--slaves", dest="slaves", nargs="+", required=True, help="slaves node IP, eg. -s 192.168.0.2 192.168.0.3") parser.add_argument("-n", "--nfs_folder", default=None, type=str, required=True, help="shared NFS folder") parser.add_argument("-j", "--json", action='store_true', help="silence mode, print result with json format") args = parser.parse_args() return args
def _parse_args(desc): parser = argment_parser(desc) parser.add_argument("-j", "--json", action='store_true', help="return json format string") args = parser.parse_args() return args
def _parse_args(desc): parser = argment_parser(desc) parser.add_argument("-t", "--task_id", type=str, required=True, help="vega application task id") parser.add_argument("-r", "--root_path", type=str, required=True, help="root path where vega application is running") args = parser.parse_args() return args
def _parse_args(): parser = argment_parser("Generate CAM(Class Activation Map) file.") parser.add_argument("-i", "--input_image_file", required=True, type=str, help="Input image file.") parser.add_argument("-o", "--output_image_file", required=True, type=str, help="Output image file.") parser.add_argument("-d", "--model_desc_file", required=True, type=str, help="Model description file.") parser.add_argument("-w", "--model_weights_file", required=True, type=str, help="Model weights file(.pth).") args = parser.parse_args() return args
def _parse_args(): parser = argment_parser("Fine tune DNet model or ResNet model.") group_backend = parser.add_argument_group( title="Set backend and device, default is pytorch and GPU") group_backend.add_argument( "-b", "--backend", default="pytorch", type=str, choices=["pytorch", "p", "tensorflow", "t", "mindspore", "m"], help="set training platform") group_backend.add_argument("-d", "--device", default="GPU", type=str, choices=["GPU", "NPU"], help="set training device") group_dataset = parser.add_argument_group(title="Dataset setting") group_dataset.add_argument( "-ds", "--dataset", default=None, type=str, required=True, help="dataset type, eg. Cifar10, ClassificationDataset.") group_dataset.add_argument("-dp", "--data_path", default=None, type=str, required=True, help="dataset path.") group_dataset.add_argument("-bs", "--batch_size", default=None, type=int, required=True, help="dataset batch size.") group_dataset.add_argument("-tp", "--train_portion", default=1.0, type=float, help="train portion.") group_dataset.add_argument("-is", "--image_size", default=224, type=int, help="image size.") group_trainer = parser.add_argument_group(title="Trainer setting") group_trainer.add_argument("-e", "--epochs", default=40, type=int, help="Modify fully_train epochs") group_model = parser.add_argument_group(title="model setting") group_model.add_argument("-n", "--network", default=None, type=str, choices=["dnet", "resnet"], help="network name, dnet or resnet.") # denet group_model.add_argument("-de", "--dnet_encoding", default=None, type=str, help="DNet network Encoding") # resnet group_model.add_argument("-rd", "--resnet_depth", default=50, type=int, help="ResNet network depth") # general group_model.add_argument("-mf", "--pretrained_model_file", default=None, type=str, required=True, help="pretrained model file") group_model.add_argument("-nc", "--num_classes", default=None, type=int, required=True, help="number of classes") group_output = parser.add_argument_group(title="output setting") group_output.add_argument("-o", "--output_path", default=None, type=int, help="set output path") args = parser.parse_args() return args
def parse_args_parser(): """Parse parameters.""" parser = argment_parser('Vega Inference.') parser.add_argument( "-c", "--model_desc", default=None, type=str, required=True, help= "model description file, generally in json format, contains 'module' node." ) parser.add_argument( "-m", "--model", default=None, type=str, required=True, help="model weight file, usually ends with pth, ckpl, etc.") parser.add_argument( "-df", "--data_format", default="classification", type=str, choices=[ "classification", "c", "super_resolution", "s", "segmentation", "g", "detection", "d" ], help="data type, " "classification: some pictures file in a folder, " "super_resolution: some low resolution picture in a folder, " "segmentation: , " "detection: . " "'classification' is default") parser.add_argument( "-dp", "--data_path", default=None, type=str, required=True, help="the folder where the file to be inferred is located.") parser.add_argument("-b", "--backend", default="pytorch", type=str, choices=["pytorch", "tensorflow", "mindspore"], help="set training platform") parser.add_argument("-d", "--device", default="GPU", type=str, choices=["CPU", "GPU", "NPU"], help="set training device") parser.add_argument("-o", "--output_file", default=None, type=str, help="output file. " "classification: ./result.csv, " "super_resolution: ./result.pkl, " "segmentation: ./result.pkl, " "detection: ./result.pkl ") args = parser.parse_args() return args
def _parse_args(sections, desc): parser = argment_parser(desc) parser.add_argument("-backend", "--general.backend", default="pytorch", type=str, help="pytorch|tensorflow|mindspore") if "cluster" in sections: parser.add_argument("-devices_per_trainer", "--general.worker.devices_per_trainer", default=None, type=int) parser.add_argument("-master_ip", "--general.cluster.master_ip", default=None, type=str) parser.add_argument("-slaves", "--general.cluster.slaves", default=[], action='store', dest='general.cluster.slaves', type=str, nargs='*', help="slave IP list") parser.add_argument("-dataset", "--dataset.type", required=True, type=str, help="dataset name.") parser.add_argument("-data_path", "--dataset.common.data_path", type=str, help="dataset path.") parser.add_argument("-batch_size", "--dataset.common.batch_size", default=256, type=int) if "model" in sections: parser.add_argument("-model_desc", "--model.model_desc", type=str) parser.add_argument("-model_file", "--model.pretrained_model_file", type=str) if "trainer" in sections: parser.add_argument("-epochs", "--trainer.epochs", type=int) if "fine_tune" in sections: parser.add_argument( "-task_type", "--task_type", default="classification", type=str, help="classification|detection|segmentation|super_resolution") parser.add_argument("-num_classes", "--trainer.num_classes", type=int) parser.add_argument( "-evaluator", "--evaluator", default=[], action='store', dest='evaluator', type=str, nargs='*', help="evaluator list, eg. -evaluator HostEvaluator DeviceEvaluator") args = vars(parser.parse_args()) args = {key: value for key, value in args.items() if args[key]} tree = Config(build_tree(args)) return tree