def initialize(self): # experiment specifics BaseOptions.initialize(self) self.argument_parser.add_argument( '--dataBalance', type=int, help= 'expand all data from different classed to the same scale with augument' ) self.argument_parser.add_argument( '--borderCropRate', type=float, default=0.5, help='trim images border at the given rate') self.argument_parser.add_argument( '--padBorderSize', type=int, default=500, help='rescale images to the given resolution') self.argument_parser.add_argument( '--massCrop', type=bool, help='use opencv to crop up the lesion without the background') self.argument_parser.add_argument( '--off', type=bool, help= 'do nothing but move images and label from row path to processed path' ) self.argument_parser.add_argument( '--testSamples', type=int, help='dev dataset set aside from the training dataset') self.initialized = False
def initialize(self): # experiment specifics BaseOptions.initialize(self) self.argument_parser.add_argument('--lossDescendThreshold', type=float, help='tell the search when to stop') self.initialized = False
def initialize(self): # experiment specifics BaseOptions.initialize(self) self.argument_parser.add_argument('--mode', type=str, default='train', help='model mode') self.argument_parser.add_argument( '--date', type=str, help='load a trained model specified by date to continue training') self.argument_parser.add_argument( '--time', type=str, help='load a trained model specified by time to continue training') self.argument_parser.add_argument( '--autoAugments', type=bool, help='activate data auto augment,true or false') self.argument_parser.add_argument( '--optimizer', type=str, help='choices including adam,sgd,momentum', choices=['adam', 'sgd']) self.argument_parser.add_argument('--learningRate', type=float, help='learningRate') self.argument_parser.add_argument( '--lossfunction', type=str, help='choices including cross,softmax', choices=['cross', 'focalloss']) # self.argument_parser.add_argument('--centerCropSize', type=int, action='append', help='center crop size') self.initialized = False
def initialize(self): BaseOptions.initialize(self) self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.') self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.') self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') self.parser.add_argument( '--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model' ) self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run') self.isTrain = False
def initialize(self, parser): BaseOptions.initialize(self, parser) parser.add_argument( '--display_freq', type=int, default=200, help='frequency of showing training results on screen') return parser
def __init__(self, training): BaseOptions.__init__(self) # dataset arguments if training: # assumes unpaired data loader is used during training self.parser.add_argument('--dirA', type=str, required=True, help='Path to training shadow dataset') self.parser.add_argument( '--dirB', type=str, required=True, help='Path to training shadow free dataset') else: # assumes single data loader is used during testing self.parser.add_argument('--dir', type=str, required=True, help='Path to test shadow dataset') # model arguments self.parser.add_argument( '--lamA', type=float, default=10.0, help='weight for forward cycle loss (A->B->A)') self.parser.add_argument( '--lamB', type=float, default=10.0, help='weight for backward cycle loss (B->A->B)') self.parser.add_argument('--lambda_ident', type=float, default=0.0, help='weight for identity loss') self.parser.add_argument( '--ngf', type=int, default=64, help='# of filters in first conv. layer of generator') self.parser.add_argument( '--ndf', type=int, default=64, help='# of filters in first conv. layer of discriminator') self.parser.add_argument( '--pool_size', type=int, default=50, help= 'the size of image buffer that stores previously generated images') self.parser.add_argument( '--queue_size', type=int, default=100, help= 'the size of mask queue that stores previously generated shadow masks' )
def initialize(self, parser): BaseOptions.initialize(self, parser) parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model') parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') parser.set_defaults(serial_batches=True) parser.set_defaults(no_flip=True) parser.set_defaults(phase='test') parser.set_defaults(load_from_opt_file=True) if not torch.cuda.is_available(): parser.set_defaults(gpu_ids="-1") self.isTrain = False return parser
def initialize(self): # experiment specifics BaseOptions.initialize(self) # self.argument_parser.add_argument('--mode', type=str, default='train', help='model mode') # self.argument_parser.add_argument('--cof', type=int, help='coieffient') # self.argument_parser.add_argument('--originalSize', type=int,action='append', help='activate data auto augment,true or false') # self.argument_parser.add_argument('--downLayerNumber', type=int, help='activate data auto augment,true or false') # self.argument_parser.add_argument('--upLayerNumber', type=int, help='activate data auto augment,true or false') self.argument_parser.add_argument('--learningRate', type=float, help='learningRate') # self.argument_parser.add_argument('--batchSize', type=int, help='batch size') self.initialized = False
def save_new_edges(filename): opt = BaseOptions() opt.initialize() opt.num_aug = 1 mesh = from_scratch(filename, opt) print(dir(mesh)) edges = np.array(mesh.edges) edge_file = filename.replace('.obj', '.edges') np.savetxt(edge_file, edges, fmt='%d') vs = np.array(mesh.vs) v_file = filename.replace('.obj', '.vs') np.savetxt(v_file, vs, fmt='%f')
def initialize(self): # experiment specifics BaseOptions.initialize(self) self.argument_parser.add_argument('--mode', type=str, default='test', help='model mode') self.argument_parser.add_argument('--date', type=str, help='the date of the trained model') self.argument_parser.add_argument('--time', type=str, help='the time of the trained model') self.initialized = False
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.set_defaults(no_shuffle=True) parser.set_defaults(datamode="test") self.is_train = False parser.add_argument( "--result_dir", type=str, default="test_results", help="save test result outputs", ) parser.add_argument( "--tryon_list", help="Use a CSV file to specify what cloth should go on each person." "The CSV should have two columns: CLOTH_PATH and PERSON_ID. " "Cloth_path is the path to the image of the cloth product to wear. " "Person_id is the identifier that corresponds to the ID under each " "annotation folder.", ) parser.add_argument( "--random_tryon", help="Randomly choose cloth-person pairs for try-on. ", action="store_true", ) # parser.add_argument(...) return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument( '--store_feature', type=str, default= '/home/visiting/Projects/levishery/reconstruction/vangogh_features.json' ) parser.add_argument( '--store_pca', type=str, default= '/home/visiting/Projects/levishery/reconstruction/utils/vangogh.pca' ) parser.add_argument( '--store_index', type=str, default= '/home/visiting/Projects/levishery/reconstruction/vangogh_index.json' ) parser.add_argument( '--result_dir', type=str, default='/home/visiting/Projects/levishery/reconstruction/result') parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc') self.isTrain = False return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # define shared options parser.add_argument('--segmap_path', required=True, help='path to the segmap label image') parser.add_argument('--photo_path', required=True, help='path to the photo image') parser.add_argument( '--output_dir', type=str, required=True, default='./results/cityscapes_predictions', help='Directory the output image will be written to.') parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images') parser.add_argument('--phase', type=str, default='val', help='train, val, test, etc') # Dropout and Batchnorm has different behaviour during training and test. parser.add_argument('--eval', action='store_true', help='use eval mode during test time.') # rewrite devalue values parser.set_defaults(model='test') # To avoid cropping, the load_size should be the same as crop_size parser.set_defaults(load_size=parser.get_default('crop_size')) self.isTrain = False return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # ---------- Define Device ---------- # parser.add_argument('--n', type=int, default=16) parser.add_argument('--port', type=str, default='/dev/cu.usbmodem1413') parser.add_argument('--freq', type=int, default=57600) parser.add_argument('--repr', type=str, nargs=16, default=[ 'Ax', 'Ay', 'Az', 'Gx', 'Gy', 'Gz', 'Mx', 'My', 'Mz', 'Q1', 'Q2', 'Q3', 'Q4', 'Y', 'P', 'R' ]) # ---------- Define Recorder ---------- # parser.add_argument('--action', type=str, default='stop') parser.add_argument('--dataDir', type=str, default='./data', help='models are saved here') # ---------- Define Painter ---------- # parser.add_argument('--display', type=int, nargs='+', default=list(range(16))) parser.add_argument('--memorySize', type=int, default=10) parser.add_argument('--ylim', type=int, default=200) # ---------- Define Parameters ---------- # parser.add_argument('--threshold', type=float, default=40) parser.add_argument('--index', type=int, nargs='*', default=[3, 4, 5]) parser.add_argument('--nStep', type=int, default=2) # ---------- Experiment Setting ---------- # parser.set_defaults(name='record') return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # ---------- Define Mode ---------- # parser.set_defaults(mode='train') # ---------- Define Network ---------- # parser.set_defaults(model='mcd') parser.add_argument('--net', type=str, default="drn_d_38", help="network structure", choices=[ 'fcn', 'psp', 'segnet', 'fcnvgg', "drn_c_26", "drn_c_42", "drn_c_58", "drn_d_22", "drn_d_38", "drn_d_54", "drn_d_105" ]) parser.add_argument('--res', type=str, default='50', metavar="ResnetLayerNum", choices=["18", "34", "50", "101", "152"], help='which resnet 18,50,101,152') # ---------- Define Dataset ---------- # parser.add_argument( '--sourceDataset', type=str, nargs='+', choices=["gta_train", "gta_val", "city_train", "city_val"], default=["gta_train"]) parser.add_argument( '--targetDataset', type=str, nargs='+', choices=["gta_train", "gta_val", "city_train", "city_val"], default=["city_train"]) # ---------- Optimizers ---------- # parser.set_defaults(opt='sgd') # ---------- Train Details ---------- # parser.add_argument( '--k', type=int, default=4, help='how many steps to repeat the generator update') parser.add_argument("--nTimesDLoss", type=int, default=1) parser.add_argument("--bgLoss", action="store_true", help='whether you add background loss') parser.add_argument('--dLoss', type=str, default="diff", choices=['mysymkl', 'symkl', 'diff'], help="choose from ['mysymkl', 'symkl', 'diff']") # ---------- Hyperparameters ---------- # parser.set_defaults(epoch=1) parser.set_defaults(nEpochStart=10) parser.set_defaults(nEpochDecay=10) # ---------- Experiment Setting ---------- # parser.set_defaults(name='mcd_da') parser.set_defaults(displayWidth=3) return parser
def initialize(self): BaseOptions.initialize(self) self.isTrain = False self.parser.add_argument('--which_epoch', type=int, required=True, default=0, help='which epoch to load for testing') self.parser.add_argument('--batch_size', type=int, default=1, help='input batch size') self.parser.add_argument('--data_root', type=str, default='./datasets/extended_CMU/', help='path to CMU images') self.parser.add_argument('--retrieval_metric', type=str, default='L1', help='metric used for retrieval, L2 || cos || L1') self.parser.add_argument('--slice_list', nargs='+', default=[2, 3, 4, 5, 6, 13, 14, 15, 16, 17, 18, 19, 20, 21], type=int, help='which slice to test') self.parser.add_argument('--testlayers_w', nargs='+', default=[0,0,0,0,1,1,0,0], type=float, help='the weight of image representation on each scale for retrieval') self.parser.add_argument('--trip_layer_index', nargs='+', default=[5,6], type=int, help='which layers are used for image retrieval, counting from 1')
def __init__(self, training): BaseOptions.__init__(self) # dataset arguments self.parser.add_argument('--dir', type=str, required=True, help='Path to test dataset') # generator arguments self.parser.add_argument( '--ngf', type=int, default=64, help='# of filters in first conv. layer of generator') # discriminator arguments self.parser.add_argument( '--ndf', type=int, default=64, help='# of filters in first conv. layer of discriminator') # training arguments self.parser.add_argument( '--num_epochs_init', type=int, default=100, help='Number of epochs to train generator for initialization') self.parser.add_argument('--num_epochs', type=int, default=2000, help='Number of epochs to train for') self.parser.add_argument('--lr_decay', type=float, default=0.1, help='Decay learning rate') self.parser.add_argument('--decay_every', type=int, default=1000, help='When to decay learning rate') self.parser.add_argument('--vgg_choose', type=str, default='block5_conv4', help='Choose layer of VGG')
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) parser.add_argument('--name', type=str, help='network cfig name') parser.add_argument('--train', type=bool, default=True, help='train or eval') # Datasets parser.add_argument('--workers', type=int, default=4, help='number of workers') parser.add_argument('--batch_size', type=int, default=4, help='input batch size') parser.add_argument('--shuffle', type=bool, default=True, help='if shuffle the dataset') parser.add_argument('--image_size', type=int, default=256, help='image size') parser.add_argument('--data_root', type=str, default='./data/dataset/Synthetic', help='data root') # Model parser.add_argument('--cuda', type=bool, default=True, help='use cuda') parser.add_argument('--nepoch', type=int, default=[14, 10, 9], help='number of total epochs') parser.add_argument('--reuse', type=bool, default=False, help='if reuse model') parser.add_argument('--gpu_id', type=int, default=[0, 1], help='gpu id for usage') parser.add_argument('--start_epoch', type=int, default=1, help='the number of epoch to start') # Visualization and saving parser.add_argument('--outf', type=str, default='./data', help='folder to output temp samples') return parser
def __init__(self, n, device=torch.device('cpu')): gpu_ids = BaseOptions().get_device() self.device = torch.device('cuda:{}'.format( gpu_ids[0])) if len(gpu_ids) > 0 else torch.device('cpu') self.__size = n self.rebuild_features = self.rebuild_features_average self.values = torch.ones(n, dtype=torch.float) self.groups = torch.sparse_coo_tensor(indices=torch.stack( (torch.arange(n), torch.arange(n)), dim=0), values=self.values, size=(self.__size, self.__size), device=self.device)
def make_soft_eseg(obj_path, eseg_path, seseg_path, nclasses=4): if not os.path.isdir(seseg_path): os.makedirs(seseg_path) files = glob.glob(os.path.join(obj_path, '*.obj')) opt = BaseOptions() opt.initialize() opt.num_aug = 1 for file in files: mesh = from_scratch(file, opt) gemm_edges = np.array(mesh.gemm_edges) obj_id = os.path.splitext(os.path.basename(file))[0] seg_file = os.path.join(eseg_path, obj_id + '.eseg') edge_seg = np.array(read_seg(seg_file).squeeze(), dtype='int32') - 1 s_eseg = -1 * np.ones((mesh.edges_count, nclasses), dtype='float64') for ei in range(mesh.edges_count): prob = np.zeros(nclasses) seg_ids, counts = np.unique(edge_seg[gemm_edges[ei]], return_counts=True) prob[seg_ids] = counts / float(len(gemm_edges[ei])) s_eseg[ei, :] = prob s_eseg_file = os.path.join(seseg_path, obj_id + '.seseg') np.savetxt(s_eseg_file, s_eseg, fmt='%f')
def main(): # get options opt = BaseOptions().parse() # basic settings os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1] if torch.cuda.is_available(): device = 'cuda' torch.backends.cudnn.benchmark = True else: device = 'cpu' ##################### Get Dataloader #################### _, dataloader_test = custom_get_dataloaders(opt) # dummy_input is sample input of dataloaders if hasattr(dataloader_test, 'dataset'): dummy_input = dataloader_test.dataset.__getitem__(0) dummy_input = dummy_input[0] dummy_input = dummy_input.unsqueeze(0) else: # for imagenet dali loader dummy_input = torch.rand(1, 3, 224, 224) ##################### Evaluate Baseline Model #################### net = ModelWrapper(opt) net = net.to(device) net.parallel(opt.gpu_ids) flops_before, params_before = model_summary(net.get_compress_part(), dummy_input) del net ##################### Evaluate Pruned Model #################### net = ModelWrapper(opt) net.load_checkpoint(opt.pruned_model) net = net.to(device) flops_after, params_after = model_summary(net.get_compress_part(), dummy_input) net.parallel(opt.gpu_ids) acc_after = net.get_eval_scores(dataloader_test) #################### Report ##################### print('######### Report #########') print('Model:{}'.format(opt.model_name)) print('Checkpoint:{}'.format(opt.pruned_model)) print('FLOPs of Original Model:{:.3f}G;Params of Original Model:{:.2f}M'. format(flops_before / 1e9, params_before / 1e6)) print('FLOPs of Pruned Model:{:.3f}G;Params of Pruned Model:{:.2f}M'. format(flops_after / 1e9, params_after / 1e6)) print('Top-1 Acc of Pruned Model on {}:{}'.format(opt.dataset_name, acc_after['accuracy'])) print('##########################')
def initialize(self, parser): BaseOptions.initialize(self, parser) parser.add_argument( '--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model' ) parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run') parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256) parser.set_defaults(serial_batches=True) parser.set_defaults(no_flip=True) parser.set_defaults(phase='test') parser.set_defaults(gpu_ids='-1') parser.set_defaults(name='coco_pretrained') self.isTrain = False return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # define shared options parser.add_argument('--results_dir', type=str, default=None, required=True, help='saves results here.') parser.add_argument('--need_profile', action='store_true') parser.add_argument('--num_test', type=int, default=float('inf'), help='how many test images to run') parser.add_argument('--model', type=str, default='test', help='which model do you want test') parser.add_argument('--netG', type=str, default='sub_mobile_resnet_9blocks', help='specify the generator architecture') parser.add_argument( '--ngf', type=int, default=64, help='the base number of filters of the student generator') parser.add_argument('--dropout_rate', type=float, default=0, help='the dropout rate of the generator') # rewrite devalue values parser.add_argument('--no_fid', action='store_true') parser.add_argument( '--real_stat_path', type=str, required=None, help= 'the path to load the groud-truth images information to compute FID.' ) parser.add_argument('--no_mIoU', action='store_true') parser.add_argument( '--times', type=int, default=100, help='times of forwarding the data to test the latency') parser.set_defaults(phase='val', serial_batches=True, no_flip=True, load_size=parser.get_default('crop_size'), batch_size=1) return parser
def initialize(self): BaseOptions.initialize(self) ### for display ### self.parser.add_argument('--save_epoch_freq', type=int, default=10) self.parser.add_argument('--print_iter_freq', type=int, default=100) ### for training ### self.parser.add_argument('--epoch', type=int, default=100) self.parser.add_argument( '--epoch_decay', type=int, default=100, help='# of iter to linearly decay learning rate to zero') self.parser.add_argument('--lr_policy', type=str, default='lambda') self.parser.add_argument('--beta1', type=float, default=0.5) self.parser.add_argument('--lr', type=float, default=0.0002) self.parser.add_argument('--weight_decay', type=float, default=0.0001) ### for discriminator ### self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') self.parser.add_argument( '--n_layer', type=int, default=3, help='only used if which_model_netD==n_layers') self.parser.add_argument('--ndf', type=int, default=64) ### for losses ### self.parser.add_argument('--lambda_vgg', type=float, default=10.0) self.parser.add_argument('--lambda_rec', type=float, default=10.0) self.isTrain = True
def initialize(self, parser: argparse.ArgumentParser): parser = BaseOptions.initialize(self, parser) # data parser.add_argument("--no_shuffle", action="store_true", help="don't shuffle input data") # checkpoints parser.add_argument( "--save_count", type=int, help="how often in steps to always save a checkpoint", default=10000, ) parser.add_argument( "--val_check_interval", "--val_frequency", dest="val_check_interval", type=str, default="0.125", # parsed later into int or float based on "." help="If float, validate (and checkpoint) after this many epochs. " "If int, validate after this many batches. If 0 or 0.0, validate " "every step.") # optimization parser.add_argument("--lr", type=float, default=1e-4, help="initial learning rate for adam") parser.add_argument( "--keep_epochs", type=int, help="number of epochs with initial learning rate", default=5, ) parser.add_argument( "--decay_epochs", type=int, help="number of epochs to linearly decay the learning rate", default=5, ) parser.add_argument( "--accumulated_batches", type=int, help= "number of batch gradients to accumulate before calling optimizer.step()", default=1) self.is_train = True return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # ---------- Define Mode ---------- # parser.set_defaults(mode= 'test') # ---------- Define Network ---------- # parser.set_defaults(model= 'test') parser.add_argument('--net', type=str, default="drn_d_38", help="network structure", choices=['fcn', 'psp', 'segnet', 'fcnvgg', "drn_c_26", "drn_c_42", "drn_c_58", "drn_d_22", "drn_d_38", "drn_d_54", "drn_d_105"]) parser.add_argument('--res', type=str, default='50', metavar="ResnetLayerNum", choices=["18", "34", "50", "101", "152"], help='which resnet 18,50,101,152') # ---------- Define Dataset ---------- # parser.add_argument('--dataset', type=str, nargs = '+', choices=["gta_train", "gta_val", "city_train", "city_val"], default = ["city_val"]) # ---------- Experiment Setting ---------- # parser.set_defaults(name= 'mcd_da') parser.set_defaults(displayWidth= 3) return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # ---------- Define Mode ---------- # parser.set_defaults(mode = 'train') # ---------- Define Dataset ---------- # parser.add_argument('--nThreads', default=4, type=int, help='# threads for loading data') parser.add_argument('--nInput', default=3, type=int, help='# threads for loading data') # ---------- Optimizers ---------- # parser.add_argument('--opt', type=str, choices=['sgd', 'adam'], default='adam', help="network optimizer") parser.add_argument('--lr', type=float, default=1e-3, help='learning rate (default: 0.001)') parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') parser.add_argument('--momentum', type=float, default=0.9, help='momentum sgd (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=2e-5, help='weight_decay (default: 2e-5)') parser.add_argument("--adjustLr", action="store_true", help='whether you change lr') parser.add_argument('--lr_policy', type=str, default='lambda', help='learning rate policy: lambda|step|plateau') parser.add_argument('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations') # ---------- Hyperparameters ---------- # parser.add_argument('--batchSize', type=int, default=2, help="batch_size") parser.add_argument('--epoch', type=int, default=1, help='the training epoch.') parser.add_argument('--nEpochStart', type=int, default=100, help='# of epoch at starting learning rate') parser.add_argument('--nEpochDecay', type=int, default=100, help='# of epoch to linearly decay learning rate to zero') # ---------- Experiment Setting ---------- # parser.set_defaults(name= 'train') parser.add_argument('--displayInterval', type=int, default=5, help='frequency of showing training results on screen') parser.add_argument('--saveLatestInterval', type=int, default=5000, help='frequency of saving the latest results') parser.add_argument('--saveEpochInterval', type=int, default=5, help='frequency of saving checkpoints at the end of epochs') return parser
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # ---------- Define Device ---------- # parser.add_argument('--n', type=int, default=16) parser.add_argument('--port', type=str, default='/dev/cu.usbmodem1413') parser.add_argument('--freq', type=int, default=115200) parser.add_argument('--repr', type=str, nargs=16, default=[ 'Ax', 'Ay', 'Az', 'Gx', 'Gy', 'Gz', 'Mx', 'My', 'Mz', 'Q1', 'Q2', 'Q3', 'Q4', 'Y', 'P', 'R' ]) # ---------- Define Painter ---------- # parser.add_argument('--display', type=int, nargs='+', default=list(range(16))) parser.add_argument('--memorySize', type=int, default=10) parser.add_argument('--ylim', type=int, default=200) # ---------- Experiment Setting ---------- # parser.set_defaults(name='main') return parser
def initialize(self): BaseOptions.initialize(self) # for displays self.parser.add_argument( '--display_freq', type=int, default=100, help='frequency of showing training results on screen') self.parser.add_argument( '--print_freq', type=int, default=100, help='frequency of showing training results on console') self.parser.add_argument('--save_latest_freq', type=int, default=1000, help='frequency of saving the latest results') self.parser.add_argument( '--save_epoch_freq', type=int, default=10, help='frequency of saving checkpoints at the end of epochs') self.parser.add_argument( '--no_html', action='store_true', help= 'do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/' ) self.parser.add_argument( '--debug', action='store_true', help='only do one epoch and displays at each iteration') # for training self.parser.add_argument( '--loadfroms', action='store_true', help='continue training: load from 32s or 16s') self.parser.add_argument( '--continue_train', action='store_true', help='continue training: load the latest model') self.parser.add_argument( '--use_softmax', action='store_true', help='if specified use softmax loss, otherwise log-softmax') self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc') self.parser.add_argument('--nepochs', type=int, default=100, help='# of iter at starting learning rate') self.parser.add_argument('--iterSize', type=int, default=10, help='# of iter at starting learning rate') self.parser.add_argument('--maxbatchsize', type=int, default=-1, help='# of iter at starting learning rate') self.parser.add_argument('--warmup_iters', type=int, default=500, help='# of iter at starting learning rate') self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam') self.parser.add_argument('--lr', type=float, default=0.00025, help='initial learning rate for adam') self.parser.add_argument('--lr_power', type=float, default=0.9, help='power of learning rate policy') self.parser.add_argument('--momentum', type=float, default=0.9, help='momentum for sgd') self.parser.add_argument('--wd', type=float, default=0.0004, help='weight decay for sgd') self.isTrain = True
def initialize(self, parser): parser = BaseOptions.initialize(self, parser) # define shared options assert isinstance(parser, argparse.ArgumentParser) parser.add_argument('--output_dir', type=str, default=None, required=True, help='the path to save the evaluation result.') parser.add_argument('--num_test', type=int, default=float('inf'), help='how many test images to run') parser.add_argument('--model', type=str, default='test', help='which model do you want test') parser.add_argument('--no_fid', action='store_true', help='whether you want to compute FID.') parser.add_argument('--no_mIoU', action='store_true', help='whether you want to compute mIoU.') parser.add_argument( '--netG', type=str, default='super_mobile_resnet_9blocks', choices=['super_mobile_resnet_9blocks', 'super_mobile_spade'], help='specify generator architecture') parser.add_argument( '--ngf', type=int, default=48, help='the base number of filters of the student generator') parser.add_argument('--dropout_rate', type=float, default=0, help='the dropout rate of the generator') parser.add_argument('--budget', type=float, default=1e18, help='the MAC budget') parser.add_argument( '--real_stat_path', type=str, default=None, help= 'the path to load the ground-truth images information to compute FID.' ) parser.add_argument('--max_cache_size', type=int, default=10000000, help='the cache size to store the results') parser.add_argument('--population_size', type=int, default=100) parser.add_argument('--mutate_prob', type=float, default=0.2, help='the probability of mutation') parser.add_argument( '--mutation_ratio', type=float, default=0.5, help= 'the ratio of networks that are generated through mutation in generation n >= 2.' ) parser.add_argument( '--parent_ratio', type=float, default=0.25, help= 'the ratio of networks that are used as parents for next generation' ) parser.add_argument( '--evolution_iters', type=int, default=500, help='how many generations of population to be searched') parser.add_argument('--criterion', type=str, default='fid', help='the criterion for the performance', choices=['fid', 'mIoU', 'accu']) parser.add_argument( '--weighted_sample', type=float, default=1, help='number of times of the probability of the smallest channel to ' 'that of the largest channel in a single layer. ' '(only affect the first generation)') parser.add_argument( '--generation_base', type=int, default=1, help='the generation base of the evolution (used for resuming)') parser.add_argument('--restore_pkl_path', type=str, default=None, help='the checkpoint to restore searching') parser.add_argument( '--only_restore_cache', action='store_true', help='whether to only restore caches in the pkl file') parser.add_argument( '--save_freq', type=int, default=60, help='the number of minutes to save the latest searching results') # rewrite devalue values parser.set_defaults(phase='val', serial_batches=True, no_flip=True, load_size=parser.get_default('crop_size'), load_in_memory=True) return parser