def load_ct_dataset(self, dataset, is_train=True,is_registered=False,grayscale_corrected=True): ''' dataset: list of dataset such as ['train\GB1', 'train\B1M6'] ''' if is_train: print('Loading train ct datasets...') train_set = utils.get_training_set(self.data_dir, dataset, self.crop_size, self.scale_factor, registered=is_registered, grayscale_corrected=grayscale_corrected) return DataLoader(dataset=train_set, num_workers=self.num_threads, batch_size=self.batch_size, shuffle=True) else: print('Loading test ct datasets...') test_set = utils.get_test_set(self.data_dir, dataset, self.crop_size, self.scale_factor, registered=is_registered, grayscale_corrected=grayscale_corrected) return DataLoader(dataset=test_set, num_workers=self.num_threads, batch_size=self.test_batch_size, shuffle=False)
if self.drawon: self.ax.figure.canvas.draw() self.val = val if not self.eventson: return for cid, func in self.observers.items(): func(discrete_val) def update_val_external(self, val, max_val): self.set_val(val, max_val) net = get_net() net.eval() data_set = get_test_set() tsDataloader = DataLoader(data_set, batch_size=len(data_set), shuffle=True, num_workers=8, collate_fn=data_set.collate_fn) len_pred = int(args.time_pred / args.dt) lossVals = torch.zeros(len_pred).to(args.device) counts = torch.zeros(len_pred).to(args.device) delta = 0.3 x_min = -100 x_max = 100 y_min = -15
from inputs import inputs from models import SegNetAutoencoder from scalar_ops import accuracy, loss import classifier import config import tensorflow as tf import utils test_file, test_labels_file = utils.get_test_set(config.working_dataset, include_labels=True) tf.app.flags.DEFINE_string('ckpt_dir', './ckpts', 'Train checkpoint directory') tf.app.flags.DEFINE_string('test', test_file, 'Test data') tf.app.flags.DEFINE_string('test_labels', test_labels_file, 'Test labels data') tf.app.flags.DEFINE_string('test_logs', './logs/test', 'Log directory') tf.app.flags.DEFINE_boolean('strided', True, 'Use strided convolutions and deconvolutions') tf.app.flags.DEFINE_integer('batch', 200, 'Batch size') FLAGS = tf.app.flags.FLAGS def test(): images, labels = inputs(FLAGS.batch, FLAGS.test, FLAGS.test_labels) tf.summary.image('labels', labels) one_hot_labels = classifier.one_hot(labels) autoencoder = SegNetAutoencoder(4, strided=FLAGS.strided) logits = autoencoder.inference(images) accuracy_op = accuracy(logits, one_hot_labels, FLAGS.batch)
import torch from torch.utils.data import DataLoader import numpy as np from losses import maskedNLL, maskedMSE from utils import Settings, get_net, get_test_set if __name__ == '__main__': args = Settings() net = get_net() testSet = get_test_set() testDataloader = DataLoader(testSet, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=testSet.collate_fn) net.train_flag = False it_testDataloader = iter(testDataloader) len_test = len(it_testDataloader) avg_loss = 0 hist_test = [] fut_test = [] pred_test = [] proba_man_test = [] mask_test = [] for j in range(len_test):
parser.add_argument('--gpuids', default=[0, 1, 2, 3], nargs='+', help='GPU ID for using') parser.add_argument('--cuda', action='store_true', help='use cuda?') opt = parser.parse_args() opt.gpuids = list(map(int, opt.gpuids)) print(opt) use_cuda = opt.cuda if use_cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") train_set = get_training_dataset(opt.upscale_factor) test_set = get_test_set(opt.upscale_factor) training_data_loader = DataLoader(dataset=train_set, num_workers=2, batch_size=128, shuffle=True) testing_data_loader = DataLoader(dataset=test_set, num_workers=2, batch_size=100, shuffle=False) print("Building model...") net = VGG('VGG19') best_acc = 0 if use_cuda:
default=4, help='Number of thread for DataLoader') opt = parser.parse_args() print(opt) if isCuda and not torch.cuda.is_available(): raise Exception("No GPU, please change isCuda False") device = torch.device("cuda" if isCuda else "cpu") print('===> Loading datasets') train_set = get_training_set(opt.upscale_factor, opt.datapath) test_set = get_test_set(opt.upscale_factor, opt.datapath) training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=batchSize, shuffle=True) testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=testBatchSize, shuffle=False) print('===> Datasets Loading Complete') print('===> Model Initialize') if opt.model == "Upconv":