def gather_options(self): # initialize parser with basic options if not self.initialized: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, _ = parser.parse_known_args() # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) opt, _ = parser.parse_known_args() # parse again with the new defaults # modify dataset-related parser options dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
def gather_options(self): """Initialize our parser with basic options(only once). Add additional model-specific and dataset-specific options. These options are defined in the <modify_commandline_options> function in model and dataset classes. """ if not self.initialized: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) opt, _ = parser.parse_known_args() model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) opt, _ = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
def gather_options(self): # initialize parser with basic options if not self.initialized: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, _ = parser.parse_known_args() # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) opt, _ = parser.parse_known_args() # parse again with the new defaults # modify dataset-related parser options dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) self.parser = parser return parser.parse_args()
if not self.initialized: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, unknown = parser.parse_known_args() # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) # modify dataset-related parser options dataset_mode = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_mode) parser = dataset_option_setter(parser, self.isTrain) opt, unknown = parser.parse_known_args() # if there is opt_file, load it. # The previous default options will be overwritten if opt.load_from_opt_file: parser = self.update_options_from_file(parser, opt) opt = parser.parse_args() self.parser = parser return opt def print_options(self, opt): message = ''
def gather_options(self): """Initialize our parser with basic options(only once). Add additional model-specific and dataset-specific options. These options are defined in the <modify_commandline_options> function in model and dataset classes. """ if not self.initialized: # check if it has been initialized parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, _ = parser.parse_known_args() #Phase Specific option: method_name = "phase_" + opt.phase + "_options" module = importlib.import_module('options.phase_specific_options') method = getattr(module, method_name) parser = method(parser) opt, _ = parser.parse_known_args() self.isTrain = opt.phase == 'train' # print('so far', opt) # print('str_options_from_file',str_options_from_file) dict_options, isLoading = self.get_all_specific_options_from_file(opt) # print('so far 2', dict_options) # parser_phase_sepcific = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.load_base_option(parser) # if isLoading: # print('\n\nis loading is true: {}'.format(dict_options)) opt_spec, _ = parser.parse_known_args(dict_options) # print('so far 3', opt_spec) # print('\n\nso far so good') managers_option_setter = managers.get_option_setter(opt_spec.model) parser = managers_option_setter(parser, self.isTrain) # opt, _ = parser.parse_known_args() # parse again with new defaults model_option_setter = models.get_option_setter(opt_spec.model) parser = model_option_setter(parser, self.isTrain) # opt, _ = parser.parse_known_args() # parse again with new defaults # modify dataset-related parser options dataset_option_setter = data.get_option_setter(opt_spec.dataset) # print(dataset_option_setter) parser = dataset_option_setter(parser, self.isTrain) # opt, _ = parser.parse_known_args() # parser_all = method(parser_phase_sepcific) # parser_all = self.initialize(parser_all) self.parser = parser # print('so far so good - last') options = parser.parse_args() # print(options) # print('so far so good - last last') # print('\n\n\n',options) # print('\n\n\n',options2) # print('so far so good') # todelete = [] # for idx,elem in enumerate(dict_options): # kk = elem.replace('--','').split('=')[0] # if not hasattr(options2, kk): # todelete.append(idx) # print('deleting elements: {}'.format(todelete)) # for i in todelete: # del dict_options[i] if isLoading: options2 = parser.parse_known_args(dict_options) # print('so far so good - last last last') for kk, vv in vars(options2[0]).items(): if hasattr(options, kk) and getattr(options, kk) != vv: old = getattr(options, kk) setattr(options, kk, vv) print('updating option: {} from {} to {}'.format(kk,old, vv)) return options
def gather_options(self): """Initialize our parser with basic options(only once). Add additional model-specific and dataset-specific options. These options are defined in the <modify_commandline_options> function in model and dataset classes. """ if not self.initialized: # check if it has been initialized parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser = self.initialize(parser) # get the basic options opt, _ = parser.parse_known_args() # modify model-related parser options # modify model-related parser options model_name = opt.model model_option_setter = models.get_option_setter(model_name) parser = model_option_setter(parser, self.isTrain) opt, _ = parser.parse_known_args() # parse again with new defaults # modify dataset-related parser options dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, self.isTrain) # save and return the parser self.parser = parser output_opt, _ = parser.parse_known_args() # determine alphabet and dataroot according to dataname: output_opt.dataroot = dataset_catalog.datasets[output_opt.dataname] if self.isTrain: if output_opt.unlabeled_dataname is not None: output_opt.unlabeled_dataroot = dataset_catalog.datasets[ output_opt.unlabeled_dataname] alphabet_dict = dataset_catalog.alphabet_dict lex_dict = dataset_catalog.lex_dict for name in alphabet_dict.keys(): if name in opt.dataname: output_opt.alphabet = getattr(alphabets, alphabet_dict[name]) if output_opt.lex == '': output_opt.lex = lex_dict[name] lex_str = '' else: lex_str = '_lex_' + os.path.splitext( output_opt.lex.split('/')[-1])[0] # save to the disk if output_opt.name == '': # name is constructed from the following: # 1. path to the datasets - with underscore instead of / beginning from main dataset name which is taken from the list IAM/RIMES/CVL/gw # 2. lexicon if it's not the default one for this dataset # 3. resolution # 4. batch size # if changed from default: # 5. dim_z # 6. no Hierarchial Z # 7. one_hot_k # 8. if different OCR from default is used, the structure of the OCR (TPS, feature extractor and prediction layer) # 9. if an rnn is used, '_useRNN' is added # 10. noGB or GB alpha different from 1 # 11. semi supervised parameters # 12. Single writer parameters # 13. Not optimizing G # 14. Use reconstruction loss instead of adversarial loss and which one # 15. Use only rec onstruction loss (alpha=Inf) output_opt.name += output_opt.name_prefix + '_' + output_opt.dataname + lex_str + output_opt.capitalize * '_CapitalizeLex' + '_GANres%s' % output_opt.resolution + '_bs%s' % output_opt.batch_size if output_opt.dim_z != 128: output_opt.name += '_dimZ%s' % output_opt.dim_z if output_opt.no_hier: output_opt.name += '_noHier' if output_opt.one_hot_k > 1: output_opt.name += '_oneHot%s' % output_opt.one_hot_k if output_opt.use_rnn: output_opt.name += '_useRNN' if self.isTrain: if output_opt.no_grad_balance: output_opt.name += '_noGB' elif output_opt.gb_alpha != 1: output_opt.name += '_GB%s' % output_opt.gb_alpha if output_opt.unlabeled_dataname is not None: output_opt.name += '_SemiSupervised_' + output_opt.unlabeled_dataname if output_opt.disjoint: output_opt.name += '_disjoint' if output_opt.single_writer: output_opt.name += '_SingleWriter' if output_opt.optimize_z: output_opt.name += 'OptimizeZ' if output_opt.not_optimize_G: output_opt.name += '_NotOptimizeG' if output_opt.onlyOCR: output_opt.name += '_OnlyOCRLoss' output_opt.len_vocab = len(output_opt.alphabet) return output_opt
parser.add_argument('--dataset_mode', type=str, default='aligned', choices=['aligned', 'single', 'cityscapes', 'coco'], help='chooses how datasets are loaded.') parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA') parser.add_argument('--load_size', type=int, default=256, help='scale images to this size') parser.add_argument('--crop_size', type=int, default=256, help='then crop to this size') parser.add_argument('--preprocess', type=str, default='none', help='scaling and cropping of images at load time ' '[resize_and_crop | crop | scale_width | scale_width_and_crop | none]') parser.add_argument('--phase', type=str, default='val', help='train, val, test, etc') parser.add_argument('--output_path', type=str, required=True, help='the path to save the statistical information.') parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU') opt, _ = parser.parse_known_args() dataset_name = opt.dataset_mode dataset_option_setter = data.get_option_setter(dataset_name) parser = dataset_option_setter(parser, False) opt = parser.parse_args() opt.num_threads = 0 opt.batch_size = 1 opt.serial_batches = True opt.no_flip = True opt.max_dataset_size = -1 opt.load_in_memory = False opt.isTrain = False if opt.dataset_mode == 'single' and opt.direction == 'AtoB': warnings.warn('Dataset mode [single] only supports direction BtoA. ' 'We will change the direction to BtoA.!') opt.direction = 'BtoA'