def test_pix2pix_fid(model, opt): opt.phase = 'val' opt.num_threads = 0 opt.batch_size = 1 opt.serial_batches = True opt.no_flip = True opt.load_size = 256 opt.display_id = -1 dataset = create_dataset(opt) model.model_eval() result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results') util.mkdirs(result_dir) fake_B = {} for i, data in enumerate(dataset): model.set_input(data) with torch.no_grad(): model.forward() visuals = model.get_current_visuals() fake_B[data['A_paths'][0]] = visuals['fake_B'] util.save_images(visuals, model.image_paths, result_dir, direction=opt.direction, aspect_ratio=opt.aspect_ratio) block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048] inception_model = InceptionV3([block_idx]) inception_model.to(model.device) inception_model.eval() npz = np.load(os.path.join(opt.dataroot, 'real_stat_B.npz')) fid = get_fid(list(fake_B.values()), inception_model, npz, model.device, opt.batch_size) return fid
def __init__(self, opt, save_dir, filename='loss_log.txt'): self.display_id = opt.display_id self.use_html = not opt.no_html self.win_size = opt.display_winsize self.save_dir = save_dir self.name = os.path.basename(self.save_dir) self.saved = False self.display_single_pane_ncols = opt.display_single_pane_ncols # Error plots self.error_plots = dict() self.error_wins = dict() if self.display_id > 0: import visdom self.vis = visdom.Visdom(port=opt.display_port) if self.use_html: self.web_dir = os.path.join(self.save_dir, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir]) self.log_name = os.path.join(self.save_dir, filename) with open(self.log_name, "a") as log_file: now = time.strftime("%c") log_file.write('================ Training Loss (%s) ================\n' % now)
def parse(self, save=True): if not self.initialized: self.initialize() self.opt = self.parser.parse_args() self.opt.isTrain = self.isTrain # train or test str_ids = self.opt.gpu_ids.split(',') self.opt.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: self.opt.gpu_ids.append(id) # # set gpu ids # if len(self.opt.gpu_ids) > 0: # torch.cuda.set_device(self.opt.gpu_ids[0]) args = vars(self.opt) print('------------ Options -------------') for k, v in sorted(args.items()): print('%s: %s' % (str(k), str(v))) print('-------------- End ----------------') # save to the disk expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) util.mkdirs(expr_dir) if save and not self.opt.continue_train: file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') return self.opt
def parse(self): if not self.initialized: self.initialize() self.opt = self.parser.parse_args() self.opt.isTrain = self.isTrain # train or test self.opt.name = self.name # experiment name #str_ids = self.opt.gpu_ids.split(',') #self.opt.gpu_ids = [] #for str_id in str_ids: # id = int(str_id) # if id >= 0: # self.opt.gpu_ids.append(id) args = vars(self.opt) print('------------ Options -------------') for k, v in sorted(args.items()): print('%s: %s' % (str(k), str(v))) print('-------------- End ----------------') # save to the disk expr_dir = os.path.join(self.opt.checkpoint_dir, self.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') return self.opt
def __init__(self, opt): self.opt = opt self.use_html = opt.use_html self.tf_log = opt.isTrain and not opt.use_html self.win_size = opt.display_winsize self.name = opt.name self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') util.mkdir(self.log_dir) # if using tensorboard if self.tf_log: import tensorflow as tf self.tf = tf self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') self.writer = tf.summary.FileWriter(self.log_dir) # if using simple html page if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') util.mkdirs([self.web_dir, self.img_dir]) # save test image results if not opt.isTrain: self.test_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_img_out') util.mkdirs([self.test_dir]) # log txt file head self.log_name_txt = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt') with open(self.log_name_txt, "a") as log_file: now = time.strftime("%c") log_file.write( '================ Training Loss (%s) ================\n' % now) # log csv file head header = ['epoch', 'iters', 'time', 'loss_G', 'loss_D'] self.log_name_csv = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.csv') with open(self.log_name_csv, "w") as log_file: writer = csv.writer(log_file, delimiter=',') writer.writerow(header) # log csv file head for individual loss self.log_individual_csv = os.path.join(opt.checkpoints_dir, opt.name, 'loss_individual_log.csv') with open(self.log_individual_csv, "w") as log_file: writer = csv.writer(log_file, delimiter=',') writer.writerow(header) # save loss graph as png self.error_plot = os.path.join(opt.checkpoints_dir, opt.name, 'error_plot.png') self.individual_error_plot = os.path.join(opt.checkpoints_dir, opt.name, 'individual_error_plot.png')
def resume_logger(opt): # parser = argparse.ArgumentParser() # parser.add_argument( # '-opt', type=str, required=True, help='Path to option JSON file.') # opt = option.parse(json_path, is_train=True) # opt = option.dict_to_nonedict( # opt) # Convert to NoneDict, which return None for missing key. # train from scratch OR resume training if opt['path']['resume_state']: # resuming training resume_state = torch.load(opt['path']['resume_state']) else: # training from scratch resume_state = None util.mkdir_and_rename( opt['path']['experiments_root']) # rename old folder if exists util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key)) # config loggers. Before it, the log will not work util.setup_logger(None, opt['path']['log'], 'train', level=logging.INFO, screen=True) util.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO) logger = logging.getLogger('base') return resume_state, logger
def parse(self): if not self.initialized: self.initialize() self.opt = self.parser.parse_args() # str_ids = self.opt.gpu_ids.split(',') # self.opt.gpu_ids = [] # for str_id in str_ids: # id = int(str_id) # if id >= 0: # self.opt.gpu_ids.append(id) args = vars(self.opt) print('------------ Options -------------') for k, v in sorted(args.items()): print('%s: %s' % (str(k), str(v))) print('-------------- End ----------------') # save to the disk if self.opt.exp_id == '': print('Please set the experimental ID with option --exp_id') exit() exp_dir = os.path.join(self.opt.exp_dir, self.opt.exp_id) util.mkdirs(exp_dir) if self.opt.resume_prefix_pose != '': trunc_index = self.opt.resume_prefix_pose.index('pth') self.opt.resume_prefix_pose = self.opt.resume_prefix_pose[ 0:trunc_index - 1] self.opt.resume_prefix_pose += '-' # opt_name = self.opt.resume_prefix_pose + 'opt.txt' # opt_name = os.path.join(exp_dir, opt_name) # else: # opt_name = os.path.join(exp_dir, 'opt.txt') if self.opt.resume_prefix_asn != '': trunc_index = self.opt.resume_prefix_asn.index('pth') self.opt.resume_prefix_asn = self.opt.resume_prefix_asn[ 0:trunc_index - 1] self.opt.resume_prefix_asn += '-' # opt_name = self.opt.resume_prefix_asdn + 'opt.txt' # opt_name = os.path.join(exp_dir, opt_name) # else: # opt_name = os.path.join(exp_dir, 'opt.txt') if self.opt.resume_prefix_dropout != '': trunc_index = self.opt.resume_prefix_dropout.index('pth') self.opt.resume_prefix_dropout = self.opt.resume_prefix_dropout[ 0:trunc_index - 1] self.opt.resume_prefix_dropout += '-' if self.opt.resume_prefix_aug != '': trunc_index = self.opt.resume_prefix_aug.index('pth') self.opt.resume_prefix_aug = self.opt.resume_prefix_aug[ 0:trunc_index - 1] self.opt.resume_prefix_aug += '-' with open('opt.txt', 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') return self.opt
def train(opt): # Prepare the training corpus print(options.TrainLogPrefix + "Prepare the training corpus begin!") from datasource.input_corpus import InputCorpus input_corpus = InputCorpus(opt.corpus_root, encoding=opt.encoding) print(options.TrainLogPrefix + "Prepare the training corpus end!") # Get the basic tfidf features print(options.TrainLogPrefix + "Get the basic tfidf features begin!") from feature.ngram_tfidf import NgramTfidf ngram_tfidf = NgramTfidf(input_corpus) ngram_tfidf.set_stopwords('./resource/stop_words_zh.utf8.txt') import numpy as np tfidf_mat, features = ngram_tfidf.get_tfidf_mat(top_k=opt.tfidf_top_k) tfidf_mat = np.asarray(tfidf_mat) features = np.asarray(features) targets = np.asarray(input_corpus.get_filenames_and_targets()[1]) print(options.TrainLogPrefix + "Get the basic tfidf features end!") # Do feature selection print(options.TrainLogPrefix + "Do feature selection begin!") if opt.which_filter == 'mi': from feature.feature_selection import MISelection as FeatureSelection feature_selector = FeatureSelection(tfidf_mat, targets, mi_threshold=opt.mi_threshold) else: from feature.feature_selection import GBDTSelection as FeatureSelection feature_selector = FeatureSelection(tfidf_mat, targets) boolean_selection_index = feature_selector.get_boolean_selection_lst() filtered_tfidf_mat = tfidf_mat[:, boolean_selection_index] filtered_features = features[boolean_selection_index] print(options.TrainLogPrefix + "Do feature selection end!") # Training model print(options.TrainLogPrefix + "Training model begin!") if opt.which_classifier == 'svm': from model.classifier import SVMClassifier as Classifier else: from model.classifier import GBDTClassifier as Classifier classifier_model = Classifier() from model.classifier import Scorer scorer = Scorer(classifier_model.get_model(), filtered_tfidf_mat, targets) print(options.TrainLogPrefix + "Training model end!") scorer.show_score() # Save the model model_save_path = opt.path_to_save_model from utils import util util.mkdirs('/'.join(model_save_path.split('/')[:-1])) classifier_model.dump(filtered_tfidf_mat, targets, model_save_path) print(options.TrainLogPrefix + 'model save to ' + model_save_path) # Save the filtered features filtered_features_save_path = opt.path_to_save_model + options.FeaturesSaveSuffix df_vec = ngram_tfidf.numDocsContainingFeatures(filtered_features) save_features_df(df_vec, filtered_features, len(tfidf_mat), filtered_features_save_path)
def _save(self, args): expr_dir = os.path.join(self._opt.checkpoints_dir, self._opt.name) print(expr_dir) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt_%s.txt' % ('train' if self.is_train else 'test')) with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n')
def initialize(self, opt): self.opt = opt self.training = opt.isTrain self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.num_classes = opt.label_nc self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) self.tensorborad_dir = os.path.join(self.opt.checkpoints_dir, 'tensorboard', opt.dataset_mode) self.model_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'model') util.mkdirs([self.tensorborad_dir, self.model_dir])
def save_models(self, epoch, save_dir, fid=None, isbest=False, direction='AtoB'): util.mkdirs(save_dir) ckpt = { 'G': self.netG.state_dict(), 'D': self.netD.state_dict(), 'epoch': epoch, 'fid': fid } if isbest: torch.save(ckpt, os.path.join(save_dir, 'model_best_%s.pth' % direction)) else: torch.save(ckpt, os.path.join(save_dir, 'model_%d.pth' % epoch))
def adaptive_personalize(opt, imitator, visualizer, output_path): output_dir = opt.output_dir mkdirs([os.path.join(output_dir, 'imgs'), os.path.join(output_dir, 'pairs')]) # TODO check if it has been computed. print('\n\t\t\tPersonalization: meta imitation...') imitator.personalize(opt.src_path, visualizer=None, output_path = output_path) meta_imitate(opt, imitator, prior_tgt_path=opt.pri_path, visualizer=None, save_imgs=True) # post tune print('\n\t\t\tPersonalization: meta cycle finetune...') loader = make_dataset(opt) imitator.post_personalize(opt.output_dir, loader, visualizer=None, verbose=False)
def dir_check(opt): if opt['is_train']: # starting from scratch, needs to create training directory if not opt['path']['resume_state']: util.mkdir_and_rename( opt['path']['experiments_root']) # rename old folder if exists util.mkdirs( (path for key, path in opt['path'].items() if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key)) else: # create testing directory util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G'))
def save_networks(self, epoch): mkdirs(self.save_dir) for name in self.model_names: if isinstance(name, str): save_filename = '%s_net_%s.pth' % (epoch, name) save_path = os.path.join(self.save_dir, save_filename) net = getattr(self, 'net' + name) if torch.cuda.is_available(): torch.save(net.cpu().state_dict(), save_path) net.to(self.device) else: torch.save(net.cpu().state_dict(), save_path)
def test_pix2pix_mIoU(model, opt): opt.phase = 'val' opt.num_threads = 0 opt.batch_size = 1 opt.serial_batches = True opt.no_flip = True opt.load_size = 256 opt.display_id = -1 dataset = create_dataset(opt) model.model_eval() result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results') util.mkdirs(result_dir) fake_B = {} names = [] for i, data in enumerate(dataset): model.set_input(data) with torch.no_grad(): model.forward() visuals = model.get_current_visuals() fake_B[data['A_paths'][0]] = visuals['fake_B'] for path in range(len(model.image_paths)): short_path = ntpath.basename(model.image_paths[0][0]) name = os.path.splitext(short_path)[0] if name not in names: names.append(name) util.save_images(visuals, model.image_paths, result_dir, direction=opt.direction, aspect_ratio=opt.aspect_ratio) drn_model = DRNSeg('drn_d_105', 19, pretrained=False).to(model.device) util.load_network(drn_model, opt.drn_path, verbose=False) drn_model.eval() mIoU = get_mIoU(list(fake_B.values()), names, drn_model, model.device, table_path=os.path.join(opt.dataroot, 'table.txt'), data_dir=opt.dataroot, batch_size=opt.batch_size, num_workers=opt.num_threads) return mIoU
def initialize(self, opt): TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S/}".format(datetime.now()) self.opt = opt self.training = opt.isTrain self.gpu_ids = opt.gpu_ids self.isTrain = opt.isTrain self.num_classes = opt.label_nc self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor self.save_dir = os.path.join(opt.checkpoints_dir, opt.name) self.tensorborad_dir = os.path.join(self.opt.checkpoints_dir, 'tensorboard', opt.dataset_mode + TIMESTAMP) self.model_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'model') util.mkdirs([self.tensorborad_dir, self.model_dir])
def save_models(self, epoch, save_dir, fid=None, isbest=False, direction='AtoB'): util.mkdirs(save_dir) ckpt = { 'G_A': self.__pop_ops_params_state_dict(self.netG_A.state_dict()), 'G_B': self.__pop_ops_params_state_dict(self.netG_B.state_dict()), 'D_A': self.netD_A.state_dict(), 'D_B': self.netD_B.state_dict(), 'epoch': epoch, 'cfg': (self.cfg_AtoB, self.cfg_BtoA), 'fid': fid } if isbest: torch.save(ckpt, os.path.join(save_dir, 'model_best_%s.pth' % direction)) else: torch.save(ckpt, os.path.join(save_dir, 'model_%d.pth' % epoch))
def save_models(self, model, epoch, name, opt, isbest): save_dir = os.path.join(opt.checkpoints_dir, opt.name, 'checkpoints') utils.mkdirs(save_dir) ckpt = { 'weight': model.module.state_dict(), 'epoch': epoch, 'cfg': opt.model, 'index': name } if isbest: torch.save(ckpt, os.path.join(save_dir, 'model%s_best.pth' % name)) else: torch.save( ckpt, os.path.join(save_dir, 'model%s_%d.pth' % (name, epoch)))
def meta_imitate(opt, imitator, prior_tgt_path, save_imgs=True, visualizer=None): src_path = opt.src_path all_tgt_paths = scan_tgt_paths(prior_tgt_path, itv=40) output_dir = opt.output_dir out_img_dir, out_pair_dir = mkdirs([os.path.join(output_dir, 'imgs'), os.path.join(output_dir, 'pairs')]) img_pair_list = [] for t in tqdm(range(len(all_tgt_paths))): tgt_path = all_tgt_paths[t] preds = imitator.inference([tgt_path], visualizer=visualizer, cam_strategy=opt.cam_strategy, verbose=False) tgt_name = os.path.split(tgt_path)[-1] out_path = os.path.join(out_img_dir, 'pred_' + tgt_name) if save_imgs: cv_utils.save_cv2_img(preds[0], out_path, normalize=True) write_pair_info(imitator.src_info, imitator.tsf_info, os.path.join(out_pair_dir, '{:0>8}.pkl'.format(t)), imitator=imitator, only_vis=opt.only_vis) img_pair_list.append((src_path, tgt_path)) if save_imgs: write_pickle_file(os.path.join(output_dir, 'pairs_meta.pkl'), img_pair_list)
def parse(self): if not self.initialized: self.initialize() self.opt = self.parser.parse_args() self.opt.isTrain = self.isTrain # train or test args = vars(self.opt) if self.opt.dataset_mode == 'sample_per_vehicle': self.opt.frames_per_sample = max(self.opt.n_rgbs_per_sample, self.opt.n_bbs_per_sample) self.opt.input_channel_dim = 3 * self.opt.n_rgbs_per_sample + self.opt.n_bbs_per_sample self.opt.train_frames_to_remove = int( self.opt.train_frames_per_scene - self.opt.ttc_threshold * 10 - self.opt.frames_per_sample + 1) self.opt.valid_frames_to_remove = int( self.opt.valid_frames_per_scene - self.opt.ttc_threshold * 10 - self.opt.frames_per_sample + 1) self.opt.test_frames_to_remove = int( self.opt.test_frames_per_scene - self.opt.ttc_threshold * 10 - self.opt.frames_per_sample + 1) elif self.opt.dataset_mode == 'sample_per_frame': self.opt.input_channel_dim = 26 else: print("Unknown dataset mode!") raise ValueError if self.opt.shuffle_by_which not in ['none', 'sample', 'frame']: print("Unknown shuffling method") raise ValueError print('------------ Options -------------') for k, v in sorted(args.items()): print('%s: %s' % (str(k), str(v))) print('-------------- End ----------------') if self.isTrain: # save to the disk expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') return self.opt
def __init__(self, opt): # self.opt = opt self.tf_log = opt.tf_log self.use_html = opt.isTrain and not opt.no_html self.win_size = opt.display_winsize self.name = opt.name if self.tf_log: import tensorflow as tf self.tf = tf self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs') self.writer = tf.summary.FileWriter(self.log_dir) if self.use_html: self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web') self.img_dir = os.path.join(self.web_dir, 'images') print('create web directory %s...' % self.web_dir) util.mkdirs([self.web_dir, self.img_dir])
def setup_logging(opt, resume_state, rank): tb_logger = None if rank <= 0: # normal training (rank -1) OR distributed training (rank 0) if resume_state is None: util.mkdir_and_rename( opt['path'] ['experiments_root']) # rename experiment folder if exists util.mkdirs( (path for key, path in opt['path'].items() if not key == 'experiments_root' and 'pretrain_model' not in key and 'resume' not in key)) # config loggers. Before it, the log will not work util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO, screen=True, tofile=True) util.setup_logger('val', opt['path']['log'], 'val_' + opt['name'], level=logging.INFO, screen=True, tofile=True) logger = logging.getLogger('base') logger.info(option.dict2str(opt)) # tensorboard logger if opt['use_tb_logger'] and 'debug' not in opt['name']: version = float(torch.__version__[0:3]) if version >= 1.1: # PyTorch 1.1 from torch.utils.tensorboard import SummaryWriter else: logger.info( 'You are using PyTorch {}. Tensorboard will use [tensorboardX]' .format(version)) from tensorboardX import SummaryWriter tb_logger = SummaryWriter(log_dir='../tb_logger/' + opt['name']) else: util.setup_logger('base', opt['path']['log'], 'train', level=logging.INFO, screen=True) logger = logging.getLogger('base') return logger, tb_logger
def check_args(args): args.isTrain = args.phase == 'train' str_ids = args.gpu_ids.split(',') args.gpu_ids = [] for str_id in str_ids: id = int(str_id) if id >= 0: args.gpu_ids.append(id) if len(args.gpu_ids) > 0: torch.cuda.set_device(args.gpu_ids[0]) try: assert args.batch_size >= 1 except: print('batch size must be larger than or equal to one') expr_dir = os.path.join(args.checkpoints_dir, args.name) util.mkdirs(expr_dir) return args
def save(self, args): expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name) print(expr_dir) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt_%s.txt' % ('train' if self.is_train else 'test')) file_name_yaml = os.path.join(expr_dir, 'opt_%s.yaml' % ('train' if self.is_train else 'test')) with open(file_name_yaml, 'w') as opt_file: yaml.dump(args, opt_file) with open(file_name, 'wt') as opt_file: opt_file.write('------------ Options -------------\n') for k, v in sorted(args.items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('-------------- End ----------------\n') file_name = os.path.join(expr_dir, 'command_line.txt') with open(file_name, 'wt') as opt_file: opt_file.write(" ".join(sys.argv))
def save_models(self, epoch, save_dir, fid=None, isbest=False, direction='AtoB'): util.mkdirs(save_dir) ckpt = { 'G': self.__pop_ops_params_state_dict(self.netG.state_dict()), 'D': self.netD.state_dict(), 'epoch': epoch, 'cfg': (self.filter_cfgs, self.channel_cfgs), 'fid': fid } if isbest: torch.save(ckpt, os.path.join(save_dir, 'model_best_%s.pth' % direction)) else: torch.save(ckpt, os.path.join(save_dir, 'model_%d.pth' % epoch))
def print_options(self, opt): message = '' message += '----------------- Options ---------------\n' for k, v in sorted(vars(opt).items()): comment = '' default = self.parser.get_default(k) if v != default: comment = '\t[default: %s]' % str(default) message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment) message += '----------------- End -------------------' print(message) # save to the disk expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) file_name = os.path.join(expr_dir, 'opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write(message) opt_file.write('\n')
def save_img_metric(self, img_dict, path, model_name, phase): util.mkdirs(os.path.join(path, model_name, phase, 'A2B', 'depth')) # util.mkdirs(os.path.join(path, model_name, phase, 'A2B', 'normal')) util.mkdirs(os.path.join(path, model_name, phase, 'B2A', 'depth')) # util.mkdirs(os.path.join(path, model_name, phase, 'B2A', 'normal')) # util.mkdirs(os.path.join(path, model_name, phase, 'A2B2A', 'depth')) # util.mkdirs(os.path.join(path, model_name, phase, 'A2B2A', 'normal')) # util.mkdirs(os.path.join(path, model_name, phase, 'B2A2B', 'depth')) # util.mkdirs(os.path.join(path, model_name, phase, 'B2A2B', 'normal')) B_depth_fake = util.tensor2mm(img_dict['fake_depth_B'], self.opt) A_name = img_dict['name_A'] A_depth_fake = util.tensor2mm(img_dict['fake_depth_A'], self.opt) B_name = img_dict['name_B'] for i in range(B_depth_fake.shape[0]): imageio.imwrite(os.path.join(path, model_name, phase, 'A2B', 'depth', A_name[i]+'.png'), B_depth_fake[i]) # imageio.imwrite(os.path.join(path, model_name, phase, 'A2B2A', 'depth', A_name[i]+'.png'), A_rec[i].astype(np.uint16)) # np.save(os.path.join(path, model_name, phase, 'A2B', 'normal', A_name[i]+'.npy'), util.get_normal_metric(B_depth_fake[i])) imageio.imwrite(os.path.join(path, model_name, phase, 'B2A', 'depth', B_name[i]+'.png'), A_depth_fake[i])
def save_img(self, img_dict, path, model_name, phase): util.mkdirs(os.path.join(path, model_name, phase, 'A')) util.mkdirs(os.path.join(path, model_name, phase, 'B')) A_imgs = util.tensor2im(img_dict['real_img_A'], self.opt, isDepth=False) A_depth = util.tensor2im(img_dict['real_depth_A'], self.opt, isDepth=True)*1000 B_depth_fake = util.tensor2im(img_dict['fake_depth_B'], self.opt, isDepth=True)*1000 A_name = img_dict['name_A'] B_imgs = util.tensor2im(img_dict['real_img_B'], self.opt, isDepth=False) B_depth = util.tensor2im(img_dict['real_depth_B'], self.opt, isDepth=True)*1000 A_depth_fake = util.tensor2im(img_dict['fake_depth_A'], self.opt, isDepth=True)*1000 B_name = img_dict['name_B'] for i in range(A_imgs.shape[0]): imageio.imwrite(os.path.join(path, model_name, phase, 'A', A_name[i]+'_img.png'), A_imgs[i]) imageio.imwrite(os.path.join(path, model_name, phase, 'A', A_name[i]+'_depth.png'), A_depth[i].astype(np.uint16)) imageio.imwrite(os.path.join(path, model_name, phase, 'A', A_name[i]+'_depth_fake.png'), B_depth_fake[i].astype(np.uint16)) imageio.imwrite(os.path.join(path, model_name, phase, 'B', B_name[i]+'_img.png'), B_imgs[i]) imageio.imwrite(os.path.join(path, model_name, phase, 'B', B_name[i]+'_depth.png'), B_depth[i].astype(np.uint16)) imageio.imwrite(os.path.join(path, model_name, phase, 'B', B_name[i]+'_depth_fake.png'), A_depth_fake[i].astype(np.uint16))
def get_options(json_path): """options""" # parser = argparse.ArgumentParser() # parser.add_argument( # '-opt', type=str, required=True, help='Path to options JSON file.') # opt = option.parse(parser.parse_args().opt, is_train=False) is_train = False opt = option.parse(json_path, is_train) util.mkdirs((path for key, path in opt['path'].items() if not key == 'pretrain_model_G')) opt = option.dict_to_nonedict(opt) util.setup_logger(None, opt['path']['log'], 'test', level=logging.INFO, screen=True) logger = logging.getLogger('base') logger.info(option.dict2str(opt)) return opt, logger
def print_options(opt): """print and save options""" # print('--------------Options--------------') # for k, v in sorted(vars(opt).items()): # print('%s: %s' % (str(k), str(v))) # print('----------------End----------------') # save to the disk expr_dir = os.path.join(opt.checkpoints_dir, opt.name) util.mkdirs(expr_dir) if opt.isTrain: file_name = os.path.join(expr_dir, 'train_opt.txt') else: file_name = os.path.join(expr_dir, 'test_opt.txt') with open(file_name, 'wt') as opt_file: opt_file.write('--------------Options--------------\n') for k, v in sorted(vars(opt).items()): opt_file.write('%s: %s\n' % (str(k), str(v))) opt_file.write('----------------End----------------\n')