def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: module_name = d if d.find('de_NTIRE2019') < 0 else 'NTIRE2019' m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) self.loader_train = MSDataLoader(args, MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu) self.loader_test = [] for d in args.data_test: if d in [ 'Set5', 'Set14', 'B100', 'Urban100', 'Manga109', 'NTIRE2019', 'de_NTIRE2019' ]: m = import_module('data.benchmark') testset = getattr(m, 'Benchmark')(args, train=False, name=d) else: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( MSDataLoader(args, testset, batch_size=1, shuffle=False, pin_memory=not args.cpu))
def get_loader(self): self.module_train = import_module('data.' + self.args.data_train) self.module_test = import_module('data.' + self.args.data_test) kwargs = {} if self.args.no_cuda: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True loader_train = None if not self.args.test_only: trainset = getattr( self.module_train, self.args.data_train)(self.args) loader_train = MSDataLoader( self.args, trainset, batch_size=self.args.batch_size, shuffle=True, **kwargs) testset = getattr(self.module_test, self.args.data_test)( self.args, train=False) loader_test = MSDataLoader( self.args, testset, batch_size=1, shuffle=False, **kwargs) return loader_train, loader_test
def __init__(self, args): self.loader_train = None if not args.test_only: module_train = import_module( 'data.' + args.data_train.lower() ) ## load the right dataset loader module trainset = getattr(module_train, args.data_train)( args ) ### load the dataset, args.data_train is the dataset name self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, collate_fn=default_collate, pin_memory=not args.cpu) if args.data_test in ['Set5', 'Set14', 'B100', 'Manga109', 'Urban100']: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, name=args.data_test, train=False) else: module_test = import_module('data.' + args.data_test.lower()) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, collate_fn=default_collate, pin_memory=not args.cpu)
def __init__(self, args): self.loader_train = None if not args.test_only: module_train = import_module('data.' + args.data_train.lower()) trainset = getattr(module_train, args.data_train)(args) self.loader_train = MSDataLoader( args, trainset, batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu ) if args.data_test in ['Set5', 'Set14', 'DPED', 'Urban100']: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.' + args.data_test.lower()) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader( args, testset, batch_size=1, shuffle=False, pin_memory=not args.cpu )
def __init__(self, args): kwargs = {} if not args.cpu: kwargs["collate_fn"] = default_collate kwargs["pin_memory"] = True else: kwargs["collate_fn"] = default_collate kwargs["pin_memory"] = False self.loader_train = None if not args.test_only: module_train = import_module("data." + args.data_train.lower()) trainset = getattr(module_train, args.data_train)(args) self.loader_train = MSDataLoader( args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs ) if args.data_test in ["dehaze_test"]: module_test = import_module("data.benchmark") testset = getattr(module_test, "Benchmark")(args, train=False) else: module_test = import_module("data." + args.data_test.lower()) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader( args, testset, batch_size=1, shuffle=False, **kwargs )
def __init__(self, args, model): kwargs = {} if not args.cpu: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False self.loader_train = None if not args.test_only: if args.data_train.lower() != 'rrl': module_train = import_module('data.' + args.data_train.lower()) trainset = getattr(module_train, args.data_train)(args) else: module_train = import_module('data.' + args.rrl_data.lower()) trainclass = getattr(module_train, args.rrl_data) module_train = import_module('data.rrl') trainset = getattr(module_train, 'RRL')(trainclass, args, model) self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']: if not args.benchmark_noise: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')(args, train=False) else: if args.data_test.lower() != 'rrl': module_test = import_module('data.' + args.data_test.lower()) testset = getattr(module_test, args.data_test)(args, train=False) else: module_test = import_module('data.' + args.rrl_data.lower()) testclass = getattr(module_test, args.rrl_data) module_test = import_module('data.rrl') testset = getattr(module_test, 'RRL')(testclass, args, model, False) self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
def __init__(self, args): kwargs = {} if not args.cpu: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False self.loader_train = None if not args.test_only: module_train = import_module('data.' + args.data_train.lower()) trainset = getattr(module_train, args.data_train)(args) self.loader_train = MSDataLoader( args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs ) print(args.data_test) args.data_test = 'Set5' if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']: if not args.benchmark_noise: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')( args, train=False ) else: print('\n') print('liuzhe======================') module_test = import_module('data.' + args.data_test.lower()) testset = getattr(module_test, args.data_test)(args, train=False) self.loader_test = MSDataLoader( args, testset, batch_size=1, shuffle=False, **kwargs )
def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: if d.find('300W') == 0: module_name = "W300" else: module_name = d m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) self.loader_train = MSDataLoader(args, MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu) self.loader_test = [] for d in args.data_test: print(d) if d.find('MultiPIE') == 0: module_name = d.split('_')[0] testPose = [int(x) for x in d.split('_')[1:]] m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d, testPose=testPose) else: if d.find('300W') == 0: module_name = "W300" else: module_name = d m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( MSDataLoader(args, testset, batch_size=1, shuffle=False, pin_memory=not args.cpu))
def __init__(self, args): kwargs = {} # kwargs就是当你传入key=value是存储的字典,成对键值对 if not args.cpu: kwargs[ 'collate_fn'] = default_collate #输入batch_size张图片,组成一个batch张量 kwargs['pin_memory'] = True #若不是只用CPU(还用GPU),则设置锁页内存 else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False #若只用CPU,则设置不锁页内存 self.loader_train = None if not args.test_only: #如果不是测试,即为训练 module_train = import_module('data.' + args.data_train.lower()) # 导入模块data.DIV2K trainset = getattr(module_train, args.data_train)(args) #等效于调用module_train.DIV2K #即trainset=div2k中DIV2K类的一个例子 self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) #将trainset中数据分块为batch if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']: if not args.benchmark_noise: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) # testset=Benchmark(args, train=False)返回测试文件路径列表list_hr, list_lr else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')(args, train=False) else: #如果是DIV2K,就不能用benchmark了,用它特定的函数 module_test = import_module('data.' + args.data_test.lower()) #data.div2k testset = getattr(module_test, args.data_test)(args, train=False) #testset=DIV2K(args, train=False) self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
def __init__(self, args): print('Initializing Data Class...') kwargs = {} if not args.cpu: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False self.loader_train = None if not args.test_only: print('Importing data module...') module_train = import_module('data.' + args.data_train.lower()) print('Data module ' + args.data_train.lower() + ' imported.') trainset = getattr(module_train, args.data_train)(args) print('Importing data module... : trainset acquired') self.loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) print('dataset: ' + args.data_test) if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']: if not args.benchmark_noise: module_test = import_module('data.benchmark') testset = getattr(module_test, 'Benchmark')(args, train=False) else: module_test = import_module('data.benchmark_noise') testset = getattr(module_test, 'BenchmarkNoise')(args, train=False) else: print('Importing data module...') module_test = import_module('data.' + args.data_test.lower()) print('Data module ' + args.data_train.lower() + ' imported.') testset = getattr(module_test, args.data_test)(args, train=False) print('Importing data module... : testset acquired') self.loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs)
def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: if d.find('300W') == 0: module_name = "W300" else: module_name = d if d.find('LS3DW') == 0: name = "LS3D-W-balanced" else: name = d m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=name)) self.loader_train = MSDataLoader(args, MyConcatDataset(datasets), batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu) self.loader_test = [] for d in args.data_test: if d.find('300W') == 0: module_name = "W300" else: module_name = d if d.find('LS3DW') == 0: name = "LS3D-W-balanced" else: name = d m = import_module('data.' + module_name.lower()) testset = getattr(m, module_name)(args, train=False, name=name) self.loader_test.append( MSDataLoader(args, testset, batch_size=1, shuffle=False, pin_memory=not args.cpu))
def __init__(self, args): self.loader_train = None if not args.test_only: datasets = [] for d in args.data_train: #是可以有多个数据集的,将对应数据集的dataset类拼接起来作为最后的训练数据 module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) datasets.append(getattr(m, module_name)(args, name=d)) #添加对应数据集的类 self.loader_train = MSDataLoader( args, MyConcatDataset(datasets), #合并 batch_size=args.batch_size, shuffle=True, pin_memory=not args.cpu) self.loader_test = [] if not args.not_test: for d in args.data_test: #同上 可以输入多个数据集,最后拼接起来 if d in ['Set5', 'Set14', 'B100', 'Urban100']: m = import_module('data.benchmark') testset = getattr(m, 'Benchmark')(args, train=False, name=d) else: module_name = d if d.find('DIV2K-Q') < 0 else 'DIV2KJPEG' m = import_module('data.' + module_name.lower()) print(module_name) testset = getattr(m, module_name)(args, train=False, name=d) self.loader_test.append( MSDataLoader(args, testset, batch_size=1, shuffle=False, pin_memory=not args.cpu))
def get_loader(self, args): module_train = import_module("data." + args.data_train.lower()) if args.data_test in ["Set5", "Set14", "B100", "Urban100"]: module_test = import_module("data.benchmark") benchmark = True else: module_test = import_module("data." + args.data_test.lower()) benchmark = False kwargs = {} if args.no_cuda: kwargs["collate_fn"] = default_collate kwargs["pin_memory"] = False else: kwargs["collate_fn"] = default_collate kwargs["pin_memory"] = True loader_train = None if not args.test_only: trainset = getattr(module_train, args.data_train)(args) loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) if benchmark: testset = getattr(module_test, "Benchmark")(args, train=False) else: testset = getattr(module_test, args.data_test)(args, train=False) loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs) return loader_train, loader_test
def get_loader(self, args): module_train = import_module('data.' + args.data_train.lower()) if args.data_test in ['Set5', 'Set14', 'B100', 'Urban100']: module_test = import_module('data.benchmark') benchmark = True else: module_test = import_module('data.' + args.data_test.lower()) benchmark = False kwargs = {} if args.no_cuda: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = False else: kwargs['collate_fn'] = default_collate kwargs['pin_memory'] = True loader_train = None if not args.test_only: trainset = getattr(module_train, args.data_train)(args) loader_train = MSDataLoader(args, trainset, batch_size=args.batch_size, shuffle=True, **kwargs) if benchmark: testset = getattr(module_test, 'Benchmark')(args, train=False) else: testset = getattr(module_test, args.data_test)(args, train=False) loader_test = MSDataLoader(args, testset, batch_size=1, shuffle=False, **kwargs) return loader_train, loader_test