def paint(self, img_file, save_path, nsampling): for c, img in enumerate(image): save = [] img_file = os.path.join(image_root, img) for mask_width in [16, 32, 48, 64]: dataset = data_loader.dataloader(self.opt, img_file, mask_width=mask_width) for i, data in enumerate(islice(dataset, self.opt.how_many)): self.model.set_input(data) out = [(normalize(data['img']) * data['mask']).cuda()] out.extend(self.model.test(save_path, nsampling)) save.extend(out) # embed() save_img_test(torch.cat(save), f"pretrained_task3_img{c}.png")
import os from options.test_options import TestOptions from dataloader.data_loader import dataloader from model.models import create_model from util.visualizer import Visualizer from util import html opt = TestOptions().parse() dataset = dataloader(opt) dataset_size = len(dataset) * opt.batchSize print ('testing images = %d ' % dataset_size) model = create_model(opt) visualizer = Visualizer(opt) web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' %(opt.phase, opt.which_epoch)) web_page = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) # testing for i,data in enumerate(dataset): model.set_input(data) model.test() model.save_results(visualizer, web_page)
--img_target_file ../../data/akada/datasets/unreal2nyu/trainB \ --lab_source_file ../../data/akada/datasets/unreal2nyu/trainA_depth \ --lab_target_file ../../data/akada/datasets/unreal2nyu/trainB_depth \ --gpu_ids 1 --shuffle --flip --rotation --no_html --display_id -1 --norm instance """ import time from options.train_options import TrainOptions from dataloader.data_loader import dataloader from model.models import create_model from util.visualizer import Visualizer opt = TrainOptions().parse() dataset = dataloader(opt) dataset_size = len(dataset) * opt.batch_size print('training images = %d' % dataset_size) # create datasets for Gaussian Process labeled_dataset = None unlabeled_dataset = None if opt.gp: labeled_dataset, unlabeled_dataset = dataloader(opt, gp=True) print('The number of labeled training images for GP = %d' % len(labeled_dataset)) print('The number of unlabeled training images for GP = %d' % len(unlabeled_dataset)) model = create_model(opt, labeled_dataset, unlabeled_dataset) visualizer = Visualizer(opt)
# python test.py --name wordattninpainting --img_file datasets/CUB_200_2011/valid.flist --results_dir results/wordattninpainting --how_many 200 --mask_file datasets/CUB_200_2011/test_mask.flist --mask_type 3 --no_shuffle --gpu_ids 0 --nsampling 1 from options import test_options from dataloader import data_loader from model import create_model from util import visualizer import torch import os if __name__=='__main__': # get testing options opt = test_options.TestOptions().parse() # creat a dataset dataset = data_loader.dataloader(opt) dataset_size = len(dataset) * opt.batchSize print('testing images = %d' % dataset_size) # create a model model = create_model(opt) model.eval() # create a visualizer visualizer = visualizer.Visualizer(opt) for i, data in enumerate(dataset): with torch.no_grad(): model.set_input(data) model.test() truths = [] for file in os.listdir(opt.results_dir): if file.endswith('_truth.png'): truths.append(opt.results_dir + '/' + file)
def paint(self, img_file, save_path, nsampling): dataset = data_loader.dataloader(self.opt, img_file) for i, data in enumerate(islice(dataset, self.opt.how_many)): self.model.set_input(data) self.model.test(save_path, nsampling)
if __name__ == '__main__': # get testing options opt = test_options.TestOptions().parse() # creat a dataset ##create input images directory opt.img_file = 'nose_test' util.mkdir(opt.img_file) if not os.path.exists(opt.img_file): os.makedirs(opt.img_file) else: for f in glob.glob(os.path.join(opt.img_file, '*')): os.remove(f) copyfile(opt.image1, os.path.join(opt.img_file, os.path.split(opt.image1)[-1])) copyfile(opt.image2, os.path.join(opt.img_file, os.path.split(opt.image2)[-1])) dataset = data_loader.dataloader(opt, True) dataset_size = len(dataset) * opt.batchSize print('testing images = %d' % dataset_size) # create a model model = create_model(opt) model.eval() # create a visualizer visualizer = visualizer.Visualizer(opt) for i, data in enumerate(islice(dataset, opt.how_many)): model.set_input(data) model.test()