'-a', metavar='MODEL', default='alexnet', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: alexnet)') parser.add_argument('--input_size', type=int, default=None, help='image input size') parser.add_argument('--model_config', default='', help='additional architecture configuration') parser.add_argument('--dtype', default='float', help='type of tensor: ' + ' | '.join(torch_dtypes.keys()) + ' (default: half)') parser.add_argument('--device', default='cuda', help='device assignment ("cpu" or "cuda")') parser.add_argument('--device_ids', default=[0], type=int, nargs='+', help='device ids assignment (e.g 0 1 2 3') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 8)')
parser.add_argument('--datasets-dir', metavar='DATASETS_DIR', default='~/Datasets', help='datasets dir') parser.add_argument('--dataset', metavar='DATASET', default='imagenet', help='dataset name or folder') parser.add_argument('--model', '-a', metavar='MODEL', default='alexnet', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: alexnet)') parser.add_argument('--input-size', type=int, default=None, help='image input size') parser.add_argument('--model-config', default='', help='additional architecture configuration') parser.add_argument('--dtype', default='float', help='type of tensor: ' + ' | '.join(torch_dtypes.keys()) + ' (default: float)') parser.add_argument('--device', default='cuda', help='device assignment ("cpu" or "cuda")') parser.add_argument('--device-ids', default=[0], type=int, nargs='+', help='device ids assignment (e.g 0 1 2 3') parser.add_argument('--world-size', default=-1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int, help='rank of distributed processes') parser.add_argument('--dist-init', default='env://', type=str, help='init used to set up distributed training') parser.add_argument('--dist-backend', default='nccl', type=str, help='distributed backend') parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers (default: 8)')