prior_tgt_path=opt.pri_path, visualizer=None, save_imgs=True) # post tune print('\n\t\t\tPersonalization: meta cycle finetune...') loader = make_dataset(opt) imitator.post_personalize(opt.output_dir, loader, visualizer=None, verbose=False) if __name__ == "__main__": # meta imitator test_opt = TestOptions().parse() if test_opt.ip: visualizer = VisdomVisualizer(env=test_opt.name, ip=test_opt.ip, port=test_opt.port) else: visualizer = None # set imitator imitator = Imitator(test_opt) if test_opt.post_tune: adaptive_personalize(test_opt, imitator, visualizer) imitator.personalize(test_opt.src_path, visualizer=visualizer)
import numpy as np import cv2 import os import subprocess import glob from options.test_options import TestOptions from model.net import InpaintingModel_GMCNN from util.utils import generate_rect_mask, generate_stroke_mask, getLatest # os.environ['CUDA_VISIBLE_DEVICES']=str(np.argmax([int(x.split()[2]) for x in subprocess.Popen( # # "nvidia-smi -q -d Memory | grep -A4 GPU | grep Free", shell=True, stdout=subprocess.PIPE).stdout.readlines()] # # )) config = TestOptions().parse() if os.path.isfile(config.dataset_path): pathfile = open(config.dataset_path, 'rt').read().splitlines() elif os.path.isdir(config.dataset_path): pathfile = glob.glob(os.path.join(config.dataset_path, '*.png')) else: print('Invalid testing data file/folder path.') exit(1) total_number = len(pathfile) test_num = total_number if config.test_num == -1 else min( total_number, config.test_num) print('The total number of testing images is {}, and we take {} for test.'. format(total_number, test_num)) print('configuring model..') ourModel = InpaintingModel_GMCNN(in_channels=4, opt=config) ourModel.print_networks()
"""Now let's download the pretrained models for males and females.""" !python download_models.py """Here, we import libraries and set options.""" import os from collections import OrderedDict from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model import util.util as util from util.visualizer import Visualizer opt = TestOptions().parse(save=False) opt.display_id = 0 # do not launch visdom opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.in_the_wild = True # This triggers preprocessing of in the wild images in the dataloader opt.traverse = True # This tells the model to traverse the latent space between anchor classes opt.interp_step = 0.05 # this controls the number of images to interpolate between anchor classes """Don't worry about this message above, ``` ipykernel_launcher.py: error: unrecognized arguments: -f /root/.local/share/jupyter/runtime/kernel-c9d47a98-bdba-4a5f-9f0a-e1437c7228b6.json ```
import os from options.test_options import TestOptions from data import CreateDataLoader from models import create_model from util.visualizer import save_images from util import html if __name__ == '__main__': opt = TestOptions().parse() # hard-code some parameters for test opt.num_threads = 0 # test code only supports num_threads = 1 opt.batch_size = 1 # test code only supports batch_size = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.display_id = -1 # no visdom display data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) model.setup(opt) # create a website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.epoch)) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) # test with eval mode. This only affects layers like batchnorm and dropout. # pix2pix: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. # CycleGAN: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. if opt.eval: model.eval() # added for cityscapes.
opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_quality") if opt.Scratch_and_Quality_restore: opt.NL_res = True opt.use_SN = True opt.correlation_renormalize = True opt.NL_use_mask = True opt.NL_fusion_method = "combine" opt.non_local = "Setting_42" opt.name = "mapping_scratch" opt.load_pretrainA = os.path.join(opt.checkpoints_dir, "VAE_A_quality") opt.load_pretrainB = os.path.join(opt.checkpoints_dir, "VAE_B_scratch") if __name__ == "__main__": opt = TestOptions().parse(save=False) parameter_set(opt) model = Pix2PixHDModel_Mapping() model.initialize(opt) model.eval() if not os.path.exists(opt.outputs_dir + "/" + "input_image"): os.makedirs(opt.outputs_dir + "/" + "input_image") if not os.path.exists(opt.outputs_dir + "/" + "restored_image"): os.makedirs(opt.outputs_dir + "/" + "restored_image") if not os.path.exists(opt.outputs_dir + "/" + "origin"): os.makedirs(opt.outputs_dir + "/" + "origin") dataset_size = 0
from __future__ import division import time import torch import numpy as np from torch.autograd import Variable import models.networks from options.test_options import TestOptions import sys from data.data_loader import * from models.models import create_model import random import json torch.manual_seed(0) opt = TestOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch root = '/home/zl548/' local_dir = root + '/Crowdsampling-the-Plenoptic-Function/' opt.root = root opt.local_dir = local_dir if opt.dataset == 'trevi': scene_id = 36 elif opt.dataset == 'pantheon': scene_id = 23 elif opt.dataset == 'coeur': scene_id = 13 elif opt.dataset == 'rock':
def main(): opt = TestOptions() args = opt.initialize() if not os.path.exists(args.save): os.makedirs(args.save) model = CreateModel(args) model.eval() model.cuda() targetloader = CreateTrgDataLoader(args) id_to_trainid = {2: 0, 1: 128, 0: 255} for index, batch in enumerate(targetloader): if index % 10 == 0: print('%d processd' % index) image, _, name = batch _, output, _, _ = model(Variable(image).cuda()) #[1,3,129,129] #import pdb;pdb.set_trace() output = nn.functional.softmax(output, dim=1) output = nn.functional.upsample( output, (1634, 1634), mode='bilinear', align_corners=True).cpu().data[0].numpy() output = output.transpose(1, 2, 0) #(1634,1634,3) ''' output_crop = output[14:526, 626:1138, 0:2] np.save("/extracephonline/medai_data2/zhengdzhang/eyes/qikan/cai/output_crop_1.npy", output_crop) #crop_img = Image.fromarray(output_crop) #crop_img.save("/extracephonline/medai_data2/zhengdzhang/eyes/qikan/cai/crop_img.png") ''' output_nomask = np.asarray(np.argmax(output, axis=2), dtype=np.uint8) #(1644,1634) unique:[0,1,2] cup_mask = get_bool(output_nomask, 0) disc_mask = get_bool(output_nomask, 0) + get_bool(output_nomask, 1) disc_mask = disc_mask.astype(np.uint8) cup_mask = cup_mask.astype(np.uint8) for i in range(5): disc_mask = scipy.signal.medfilt2d(disc_mask, 19) cup_mask = scipy.signal.medfilt2d(cup_mask, 19) disc_mask = morphology.binary_erosion(disc_mask, morphology.diamond(7)).astype( np.uint8) # return 0,1 cup_mask = morphology.binary_erosion(cup_mask, morphology.diamond(7)).astype( np.uint8) # return 0,1 disc_mask = get_largest_fillhole(disc_mask) cup_mask = get_largest_fillhole(cup_mask) disc_mask = morphology.binary_dilation(disc_mask, morphology.diamond(7)).astype( np.uint8) # return 0,1 cup_mask = morphology.binary_dilation(cup_mask, morphology.diamond(7)).astype( np.uint8) # return 0,1 disc_mask = get_largest_fillhole(disc_mask).astype( np.uint8) # return 0,1 cup_mask = get_largest_fillhole(cup_mask).astype(np.uint8) output_nomask = disc_mask + cup_mask output_col = np.ones(output_nomask.shape, dtype=np.float32) for k, v in id_to_trainid.items(): output_col[output_nomask == k] = v output_col = Image.fromarray(output_col.astype(np.uint8)) output_nomask = Image.fromarray(output_nomask) name = name[0].split('.')[0] + '.png' output_nomask.save('%s/%s' % (args.save, name)) output_col.save('%s/color_%s' % (args.save, name)) disc_dice, cup_dice = calculate_dice(args.gt_dir, args.save, args.devkit_dir) print('===> disc_dice:' + str(round(disc_dice, 3)) + '\t' + 'cup_dice:' + str(round(cup_dice, 3)))
import os import ntpath from PIL import Image from options.test_options import TestOptions from data.custom_dataset_dataloader import CreateDataLoader from model.pixelization_model import PixelizationModel def save_image(image_numpy, image_path): image_pil = Image.fromarray(image_numpy.astype('uint8')) image_pil.save(image_path) opt = TestOptions().parse() opt.batchSize = 1 data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = PixelizationModel() model.initialize(opt) for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test() img_path = model.get_image_paths() img_dir = os.path.join(opt.results_dir, '%s_%s' % (opt.phase, opt.which_epoch)) if not os.path.exists(img_dir): os.makedirs(img_dir)
import os from collections import OrderedDict from torch.autograd import Variable from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model import util.util as util from util.visualizer import Visualizer from util import html import torch if __name__ == "__main__": opt = TestOptions().parse(save=False) opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip # add instance_feat to control image generation opt.instance_feat = True # opt.use_encoded_image = True data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' %
import sys from torch.autograd import Variable from CIFAR_10.models import * seed_list = [10086] ACC_list_l = [] #forward_list = ['IMPLICIT', 'EXPLICIT-CE-EQ', 'EXPLICIT-CE', 'EXPLICIT-RES'] forward_list = ['EXPLICIT-RES'] for se in range(len(forward_list)): torch.manual_seed(seed_list[0]) np.random.seed(seed_list[0]) # Extract the options opt = TestOptions().parse() # For testing the neural networks, manually edit/add options below opt.gan_mode = 'none' # 'wgangp', 'lsgan', 'vanilla', 'none' # Set the input dataset opt.dataset_mode = 'CIFAR10' # Current dataset: CIFAR10, CelebA if opt.dataset_mode in ['CIFAR10', 'CIFAR100']: opt.n_layers_D = 3 opt.n_downsample = 2 # Downsample times opt.n_blocks = 2 # Numebr of residual blocks opt.first_kernel = 5 # The filter size of the first convolutional layer in encoder # Initial learning rate elif opt.dataset_mode == 'CelebA': opt.n_layers_D = 3
import os from collections import OrderedDict import data from options.test_options import TestOptions from models.pix2pix_model import Pix2PixModel from util.visualizer import Visualizer from util import html TestOptions = TestOptions() opt = TestOptions.parse() world_size = 1 rank = 0 opt.world_size = world_size opt.gpu = 0 opt.mpdist = False TestOptions.save_options(opt) dataloader = data.create_dataloader(opt, world_size, rank) model = Pix2PixModel(opt) model.eval() visualizer = Visualizer(opt, rank) # create a webpage that summarizes the all results web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch))
# coding=utf-8 import pdb import time import torch import sys from tqdm import tqdm from models import JLSModel from datasets import VOC, Folder, ImageFiles from evaluate_seg import evaluate_iou from evaluate_sal import fm_and_mae import json import os from options.test_options import TestOptions opt = TestOptions() # set CUDA_VISIBLE_DEVICES before import torch opt.parser.set_defaults(model='JLSDeepLab') opt.parser.set_defaults(phase='test2') opt = opt.parse() #home = os.path.expanduser("~") home = "." label = "" # label of model parameters to load voc_train_img_dir = '%s/data/datasets/segmentation_Dataset/VOCdevkit/VOC2012/JPEGImages'%home voc_train_gt_dir = '%s/data/datasets/segmentation_Dataset/VOCdevkit/VOC2012/SegmentationClassAug'%home voc_val_img_dir = '%s/data/datasets/segmentation_Dataset/VOCdevkit/VOC2012/JPEGImages'%home voc_val_gt_dir = '%s/data/datasets/segmentation_Dataset/VOCdevkit/VOC2012/SegmentationClass'%home voc_train_split = '%s/data/datasets/segmentation_Dataset/VOCdevkit/VOC2012/ImageSets/Segmentation/argtrain.txt'%home
from data.data_loader import CreateDataLoader import util.util as util from util.visualizer import Visualizer import os import numpy as np import torch import torchvision import torchvision.transforms as transforms import scipy.io as sio import models.channel as chan import shutil from pytorch_msssim import ssim, ms_ssim, SSIM, MS_SSIM import math # Extract the options opt = TestOptions().parse() # For testing the neural networks, manually edit/add options below opt.gan_mode = 'none' # 'wgangp', 'lsgan', 'vanilla', 'none' opt.C_channel = 20 # The output channel number of encoder (Important: it controls the rate) opt.n_downsample = 2 # Downsample times opt.n_blocks = 2 # Numebr of residual blocks opt.first_kernel = 5 # The filter size of the first convolutional layer in encoder # Set the input dataset opt.dataset_mode = 'CIFAR10' # Current dataset: CIFAR10, CelebA # Set up the training procedure opt.batchSize = 1 # batch size
import os from options.test_options import TestOptions from data import CreateDataLoader from models import create_model from util.visualizer import save_images from util import html if __name__ == '__main__': opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.display_id = -1 # no visdom display data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) model.setup(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) # test for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test() visuals = model.get_current_visuals() img_path = model.get_image_paths() if i % 5 == 0:
import time import os from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model from util.visualizer import Visualizer from util import html opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.loadSize=256 data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() if opt.dataset_mode == 'labeled': opt.n_classes = data_loader.get_dataset().num_classes model = create_model(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) # test for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test() visuals = model.get_current_visuals()
lm = t68[48:49, :] rm = t68[54:55, :] t5 = np.concatenate([le, re, no, lm, rm], axis=0) t5 = t5.reshape(10) return t5 def save_img(img, save_path): image_numpy = util.tensor2im(img) util.save_image(image_numpy, save_path, create_dir=True) return image_numpy if __name__ == '__main__': opt = TestOptions().parse() data_info = data.dataset_info() datanum = data_info.get_dataset(opt)[0] folderlevel = data_info.folder_level[datanum] dataloaders = data.create_dataloader_test(opt) visualizer = Visualizer(opt) iter_counter = IterationCounter(opt, len(dataloaders[0]) * opt.render_thread) # create a webpage that summarizes the all results testing_queue = Queue(10) ngpus = opt.device_count
def make_deerace_photo(project_name='deerace', data_root_path='datasets/deerace/testA', results_dir='./results/'): #if __name__ == '__main__': opt = TestOptions().parse() # get test options # hard-code some parameters for test #print(opt) ########DeERace usage######### opt.no_dropout = True opt.model = 'test' opt.dataroot = data_root_path #'datasets/deerace/testA' opt.results_dir = results_dir #'./results/' opt.name = project_name #'deerace' ########DeERace usage######### opt.num_threads = 0 # test code only supports num_threads = 1 opt.batch_size = 1 # test code only supports batch_size = 1 opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. opt.no_flip = True # no flip; comment this line if results on flipped images are needed. opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. dataset = create_dataset( opt) # create a dataset given opt.dataset_mode and other options model = create_model( opt) # create a model given opt.model and other options model.setup( opt) # regular setup: load and print networks; create schedulers # create a website web_dir = os.path.join(opt.results_dir, opt.name, '{}_{}'.format( opt.phase, opt.epoch)) # define the website directory if opt.load_iter > 0: # load_iter is 0 by default web_dir = '{:s}_iter{:d}'.format(web_dir, opt.load_iter) print('creating web directory', web_dir) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.epoch)) # test with eval mode. This only affects layers like batchnorm and dropout. # For [pix2pix]: we use batchnorm and dropout in the original pix2pix. You can experiment it with and without eval() mode. # For [CycleGAN]: It should not affect CycleGAN as CycleGAN uses instancenorm without dropout. if opt.eval: model.eval() out_deerace_face_struct = [] for i, data in enumerate(dataset): if i >= opt.num_test: # only apply our model to opt.num_test images. break model.set_input(data) # unpack data from data loader model.test() # run inference visuals = model.get_current_visuals() # get image results img_path = model.get_image_paths() # get image paths if i % 5 == 0: # save images to an HTML file print('processing (%04d)-th image... %s' % (i, img_path)) save_image_result = save_images(webpage, visuals, img_path, aspect_ratio=opt.aspect_ratio, width=opt.display_winsize) print(save_image_result) out_deerace_face_struct.append(save_image_result) webpage.save() # save the HTML del model torch.cuda.empty_cache() return out_deerace_face_struct
import os import time import numpy as np from options.test_options import TestOptions from data import CreateDataLoader from models import create_model from util.visualizer import Visualizer from util import html from util.measure_perceptual_loss import run_style_transfer if __name__ == '__main__': opt = TestOptions().parse() opt.serial_batches = True # no shuffle content_list = [] style_list = [] start_time = time.time() number_of_steps = 500 for idxA in range(0, 50): # Load from A dataset one image at a time model = create_model(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) opt.start = idxA data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() dataset_size = len(data_loader)
import os from options.test_options import TestOptions from data import CreateDataLoader from models import create_model from util.visualizer import save_images from util import html if __name__ == '__main__': opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.display_id = -1 # no visdom display opt.loadSize = 256 opt.fineSize = 256 data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) model.setup(opt) # test for i, data in enumerate(dataset): # if i >= opt.how_many: # break model.set_input(data) model.test() visuals = model.get_current_visuals() img_path = model.get_image_paths() if i % 5 == 0: print('processing (%04d)-th image... %s' % (i, img_path)) save_images(visuals, img_path, opt.camA, opt.camB, opt.save_root)
from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model import numpy as np from util.visualizer import Visualizer from torch.autograd import Variable import torch.nn from sklearn.cluster import KMeans from models.networks import quantizer from models.Bpgan_VGG_Extractor import Bpgan_VGGLoss from util.nnls import nnls import ntpath import os import imageio import librosa opt = TestOptions().parse(save=False) opt.nThreads = 4 # test code only supports nThreads = 1 opt.batchSize = 128 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.quantize_type = 'scalar' opt.model ="Bpgan_GAN" how_many_infer = 20 data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) visualizer = Visualizer(opt) device = torch.device("cuda") Critiretion = torch.nn.MSELoss().to(device)
print("Image file not found or format is incorrect") return return tform(img).unsqueeze(0) def infer(model, data, device): data = data.to(device) with torch.no_grad(): output = model(data).squeeze() pred = output.argmax() prob = torch.exp(output.max()) return pred.item(), prob.item() # Test settings args = TestOptions().parse() # Configure GPU if not args.gpu_id < 0 and torch.cuda.is_available(): torch.cuda.set_device(args.gpu_id) device = torch.device("cuda") else: device = torch.device("cpu") # Load pretrained network model = CustomNet(args).to(device) model.load_network(args.checkpoints_dir, args.name, args.which_epoch) model.eval() # Preprocessing function tform = transforms.Compose(
from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model import util.util as util from util.visualizer import Visualizer from util import html from torch.autograd import Variable from util.util import tensor2im, parsingim_2_tensor import cv2 as cv from models.geo.geo_API import GeoAPI from models.geo.generate_theta_json_20channel_baseon_src_dst_path import generate_theta from models.geo.geotnf.transformation import GeometricTnf from data.utils import get_thetas_affgrid_tensor_by_json, get_parsing_label_tensor, get_label_tensor opt = TestOptions().parse(save=False) opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip opt.stage = 123 ## choose stage_I_II_dataset.py data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() opt.name = "stage_I_gan_ganFeat_noL1_oneD_Parsing_bz50_parsing20_04222" opt.which_G = "resNet" opt.stage = 1 opt.which_epoch=100 model_1 = create_model(opt)
def main(): opt = TestOptions() args = opt.initialize() os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU if not os.path.exists(args.save): os.makedirs(args.save) args.restore_from = args.restore_opt1 model1 = CreateSSLModel(args) model1.eval() model1.cuda() args.restore_from = args.restore_opt2 model2 = CreateSSLModel(args) model2.eval() model2.cuda() args.restore_from = args.restore_opt3 model3 = CreateSSLModel(args) model3.eval() model3.cuda() targetloader = CreateTrgDataSSLLoader(args) # change the mean for different dataset IMG_MEAN = np.array((104.00698793, 116.66876762, 122.67891434), dtype=np.float32) IMG_MEAN = torch.reshape(torch.from_numpy(IMG_MEAN), (1, 3, 1, 1)) mean_img = torch.zeros(1, 1) predicted_label = np.zeros((len(targetloader), 512, 1024)) predicted_prob = np.zeros((len(targetloader), 512, 1024)) image_name = [] with torch.no_grad(): for index, batch in enumerate(targetloader): if index % 100 == 0: print('%d processd' % index) image, _, name = batch if mean_img.shape[-1] < 2: B, C, H, W = image.shape mean_img = IMG_MEAN.repeat(B, 1, H, W) image = image.clone() - mean_img image = Variable(image).cuda() # forward output1 = model1(image) output1 = nn.functional.softmax(output1, dim=1) output2 = model2(image) output2 = nn.functional.softmax(output2, dim=1) output3 = model3(image) output3 = nn.functional.softmax(output3, dim=1) a, b = 0.3333, 0.3333 output = a * output1 + b * output2 + (1.0 - a - b) * output3 output = nn.functional.interpolate( output, (512, 1024), mode='bilinear', align_corners=True).cpu().data[0].numpy() output = output.transpose(1, 2, 0) label, prob = np.argmax(output, axis=2), np.max(output, axis=2) predicted_label[index] = label.copy() predicted_prob[index] = prob.copy() image_name.append(name[0]) thres = [] for i in range(19): x = predicted_prob[predicted_label == i] if len(x) == 0: thres.append(0) continue x = np.sort(x) thres.append(x[np.int(np.round( len(x) * 0.66))]) # paper mentions taking top 66% or 0.9 as threshold print(thres) thres = np.array(thres) thres[thres > 0.9] = 0.9 print(thres) for index in range(len(targetloader)): name = image_name[index] label = predicted_label[index] prob = predicted_prob[index] for i in range(19): label[(prob < thres[i]) * (label == i)] = 255 output = np.asarray(label, dtype=np.uint8) output = Image.fromarray(output) name = name.split('/')[-1] output.save('%s/%s' % (args.save, name))
load_split_nii(nii, split_outfolder, 1, 0) num = len(glob.glob(split_outfolder + '\\*.png')) results_dir = '.\\results\\' + name dataroot = split_outfolder args = [ '--dataroot', dataroot, '--gpu_ids', '-1', '--name', 'ct_color', '--no_dropout', '--preprocess', 'none', '--num_test', str(num), '--results_dir', results_dir ] bkp_argv = sys.argv for arg in args: sys.argv.append(arg) opt = TestOptions().parse() # get test options # hard-code some parameters for test opt.num_threads = 0 # test code only supports num_threads = 1 opt.batch_size = 1 # test code only supports batch_size = 1 opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. opt.no_flip = True # no flip; comment this line if results on flipped images are needed. opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file. dataset = create_dataset( opt) # create a dataset given opt.dataset_mode and other options model = create_model( opt) # create a model given opt.model and other options model.setup( opt) # regular setup: load and print networks; create schedulers if opt.eval: model.eval()
""" usage: python3 generate.py --image_path ./apple_test.jpg --name apple2orange --model cycle_gan --gpu_ids -1 gpu_ids: -1 for cpu inference """ from options.test_options import TestOptions opt = TestOptions().parse() from models.one_direction_test_model import OneDirectionTestModel from data.unaligned_data_loader import load_image_for_prediction import sys import cv2 import os import numpy as np from PIL import Image opt.nThreads = 1 opt.batchSize = 1 opt.serial_batches = True def generate(): """ generate single image specific by image path, and show the after generated image :return: """ image_path = opt.image_path print('generate from {}'.format(image_path)) data = load_image_for_prediction(opt, image_path)
import time import os from options.test_options import TestOptions import numpy as np from data.data_loader import CreateDataLoader from models.models import create_model from util.visualizer import Visualizer from pdb import set_trace as st from util import html opt = TestOptions().parse() # set CUDA_VISIBLE_DEVICES before import torch opt.nThreads = 1 # test code only supports nThreads=1 opt.batchSize = 1 #test code only supports batchSize=1 opt.serial_batches = True # no shuffle opt.stack = True opt.use_dropout = False opt.use_dropout1 = False data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() visualizer = Visualizer(opt) epoch_list = list(range(26))+list(np.arange(26,101,2))+list(np.arange(101,int(opt.which_epoch1)+1,20)) for epoch in epoch_list: opt.which_epoch1 = epoch model = create_model(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch+'+'+str(epoch))) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch+'+'+str(epoch))) # test for i, data in enumerate(dataset):
import shutil import os def delete_ipynb_checkpoints(): # delete all .ipynb_checkpoints dir for filename in Path(os.getcwd()).glob('**/*.ipynb_checkpoints'): try: shutil.rmtree(filename) except OSError as e: print(e) else: print("The %s is deleted successfully" % (filename)) delete_ipynb_checkpoints() # options opt = TestOptions().parse() opt.num_threads = 1 # test code only supports num_threads=1 opt.batch_size = 1 # test code only supports batch_size=1 opt.serial_batches = True # no shuffle # create dataset dataset = create_dataset(opt) model = create_model(opt) model.setup(opt) model.eval() print('Loading model %s' % opt.model) # create website web_dir = os.path.join(opt.results_dir, opt.phase + '_sync' if opt.sync else opt.phase) webpage = html.HTML(web_dir, 'Training = %s, Phase = %s, Class =%s' % (opt.name, opt.phase, opt.name))
import time import os from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model from util.visualizer import Visualizer from pdb import set_trace as st from util import html from psnr import test_psnr opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join("./ablation/", opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML( web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) # test print(len(dataset)) for i, data in enumerate(dataset): model.set_input(data) visuals = model.predict()
from data import create_dataset from models import create_model from util.visualizer import Visualizer from util.visualizer import save_segment_result from util.metrics import RunningScore from util import util import time import os import numpy as np import torch.nn as nn best_result = 0 if __name__ == '__main__': # 验证设置 opt_val = TestOptions().parse() # 设置显示验证结果存储的设置 web_dir = os.path.join(opt_val.checkpoints_dir, opt_val.name, 'val') image_dir = os.path.join(web_dir, 'images') util.mkdirs([web_dir, image_dir]) # 设置验证数据集 dataset_val = create_dataset(opt_val) dataset_val_size = len(dataset_val) print('The number of valling images = %d' % dataset_val_size) # 创建验证模型 model_val = create_model(opt_val) model_val.eval()
import time import os from options.test_options import TestOptions from data.data_loader import DataLoader from models.combogan_model import ComboGANModel from util.visualizer import Visualizer from util import html opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.no_flip = True # no flip dataset = DataLoader(opt) model = ComboGANModel(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%d' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %d' % (opt.name, opt.phase, opt.which_epoch)) # store images for matrix visualization vis_buffer = [] # test for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test() visuals = model.get_current_visuals(testing=True) img_path = model.get_image_paths()
def run(): test_opts = TestOptions().parse() out_path_results = os.path.join(test_opts.exp_dir, 'inference_results') os.makedirs(out_path_results, exist_ok=True) # update test options with options used during training ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu') opts = ckpt['opts'] opts.update(vars(test_opts)) opts = Namespace(**opts) if opts.encoder_type in ENCODER_TYPES['pSp']: net = pSp(opts) else: net = e4e(opts) net.eval() net.cuda() print('Loading dataset for {}'.format(opts.dataset_type)) dataset_args = data_configs.DATASETS[opts.dataset_type] transforms_dict = dataset_args['transforms'](opts).get_transforms() dataset = InferenceDataset( root=opts.data_path, transform=transforms_dict['transform_inference'], opts=opts) dataloader = DataLoader(dataset, batch_size=opts.test_batch_size, shuffle=False, num_workers=int(opts.test_workers), drop_last=False) if opts.n_images is None: opts.n_images = len(dataset) # get the image corresponding to the latent average avg_image = net(net.latent_avg.unsqueeze(0), input_code=True, randomize_noise=False, return_latents=False, average_code=True)[0] avg_image = avg_image.to('cuda').float().detach() if opts.dataset_type == "cars_encode": avg_image = avg_image[:, 32:224, :] tensor2im(avg_image).save(os.path.join(opts.exp_dir, 'avg_image.jpg')) if opts.dataset_type == "cars_encode": resize_amount = (256, 192) if opts.resize_outputs else (512, 384) else: resize_amount = (256, 256) if opts.resize_outputs else (opts.output_size, opts.output_size) global_i = 0 global_time = [] all_latents = {} for input_batch in tqdm(dataloader): if global_i >= opts.n_images: break with torch.no_grad(): input_cuda = input_batch.cuda().float() tic = time.time() result_batch, result_latents = run_on_batch( input_cuda, net, opts, avg_image) toc = time.time() global_time.append(toc - tic) for i in range(input_batch.shape[0]): results = [ tensor2im(result_batch[i][iter_idx]) for iter_idx in range(opts.n_iters_per_batch) ] im_path = dataset.paths[global_i] # save step-by-step results side-by-side for idx, result in enumerate(results): save_dir = os.path.join(out_path_results, str(idx)) os.makedirs(save_dir, exist_ok=True) result.resize(resize_amount).save( os.path.join(save_dir, os.path.basename(im_path))) # store all latents with dict pairs (image_name, latents) all_latents[os.path.basename(im_path)] = result_latents[i] global_i += 1 stats_path = os.path.join(opts.exp_dir, 'stats.txt') result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time)) print(result_str) with open(stats_path, 'w') as f: f.write(result_str) # save all latents as npy file np.save(os.path.join(test_opts.exp_dir, 'latents.npy'), all_latents)
import time import os from options.test_options import TestOptions from data.data_loader import CreateDataLoader from models.models import create_model from util.visualizer import Visualizer from util import html opt = TestOptions().parse() opt.nThreads = 1 # test code only supports nThreads = 1 opt.batchSize = 1 # test code only supports batchSize = 1 opt.serial_batches = True # no shuffle opt.no_flip = True # no flip data_loader = CreateDataLoader(opt) dataset = data_loader.load_data() model = create_model(opt) visualizer = Visualizer(opt) # create website web_dir = os.path.join(opt.results_dir, opt.name, '%s_%s' % (opt.phase, opt.which_epoch)) webpage = html.HTML(web_dir, 'Experiment = %s, Phase = %s, Epoch = %s' % (opt.name, opt.phase, opt.which_epoch)) # test for i, data in enumerate(dataset): if i >= opt.how_many: break model.set_input(data) model.test() visuals = model.get_current_visuals() img_path = model.get_image_paths() print('process image... %s' % img_path) visualizer.save_images(webpage, visuals, img_path)
import os from options.test_options import TestOptions from data import create_dataset from models import create_model from utils import utils from PIL import Image from tqdm import tqdm import torch import time import numpy as np if __name__ == '__main__': opt = TestOptions() opt = opt.parse() # get test options opt.num_threads = 0 # test code only supports num_threads = 1 opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed. opt.no_flip = True dataset = create_dataset( opt) # create a dataset given opt.dataset_mode and other options model = create_model( opt) # create a model given opt.model and other options model.load_pretrain_models() netP = model.netP model.eval() for i, data in tqdm(enumerate(dataset), total=len(dataset) // opt.batch_size): inp = data['LR'] with torch.no_grad(): parse_map, _ = netP(inp)