# sep_data = [p for p in sep_data if 'foggy' in p] print('loaded {} data'.format(len(sep_data))) dataset = ClassImageLoader(paths=sep_data, transform=transform, inf=True) loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers, drop_last=True) random_loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers, drop_last=True) # load model transfer = Conditional_UNet(num_classes=args.num_classes) sd = torch.load(args.cp_path) transfer.load_state_dict(sd['inference']) transfer.eval() classifer = torch.load(args.classifer_path) classifer = nn.Sequential(classifer, nn.Softmax(dim=1)) classifer.eval() transfer.cuda() classifer.cuda() bs = args.batch_size labels = torch.as_tensor(np.arange(args.num_classes, dtype=np.int64)) onehot = torch.eye(args.num_classes)[labels].to('cuda')
def build(self): args = self.args # Models print('Build Models...') self.inference = Conditional_UNet(num_classes=self.num_classes) self.discriminator = SNDisc(num_classes=self.num_classes) exist_cp = sorted(glob(os.path.join(args.save_dir, args.name, '*'))) if len(exist_cp) != 0: print('Load checkpoint:{}'.format(exist_cp[-1])) sd = torch.load(exist_cp[-1]) self.inference.load_state_dict(sd['inference']) self.discriminator.load_state_dict(sd['discriminator']) self.epoch = sd['epoch'] self.global_step = sd['global_step'] print('Success checkpoint loading!') else: print('Initialize training status.') self.epoch = 0 self.global_step = 0 self.estimator = torch.load(args.estimator_path) self.estimator.eval() # Models to CUDA [ i.cuda() for i in [self.inference, self.discriminator, self.estimator] ] # Optimizer self.g_opt = torch.optim.Adam(self.inference.parameters(), lr=args.lr, betas=(0.0, 0.999), weight_decay=args.lr / 20) self.d_opt = torch.optim.Adam(self.discriminator.parameters(), lr=args.lr, betas=(0.0, 0.999), weight_decay=args.lr / 20) # これらのloaderにsamplerは必要ないのか? self.train_loader = torch.utils.data.DataLoader( self.train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers) if args.sampler: self.random_loader = torch.utils.data.DataLoader( self.train_set, batch_size=args.batch_size, sampler=ImbalancedDatasetSampler(self.train_set), drop_last=True, num_workers=args.num_workers) else: self.random_loader = torch.utils.data.DataLoader( self.train_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers) if not args.image_only: self.test_loader = torch.utils.data.DataLoader( self.test_set, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers) test_data_iter = iter(self.test_loader) self.test_random_sample = [ tuple(d.to('cuda') for d in test_data_iter.next()) for i in range(2) ] del test_data_iter, self.test_loader self.scalar_dict = {} self.image_dict = {} self.shift_lmda = lambda a, b: (1. - self.lmda) * a + self.lmda * b print('Build has been completed.')
df.loc[:, cols] = (df.loc[:, cols].fillna(0) - df_mean) / df_std del temp oneyear_dataset = OneYearWeatherSignals(args.image_root, df, cols, args.photo_id, transform, args.city_name) signal_loader = torch.utils.data.DataLoader(oneyear_dataset, batch_size=args.batch_size, num_workers=args.num_workers, drop_last=True) # load model transfer = Conditional_UNet(len(cols)) sd = torch.load(args.cp_path) transfer.load_state_dict(sd['inference']) classifer = torch.load(args.classifer_path) classifer.eval() # if args.gpu > 0: transfer.cuda() classifer.cuda() bs = args.batch_size # out_li = [] os.makedirs(args.output_dir, exist_ok=True) for k, data in tqdm(enumerate(signal_loader)):
dataset = FlickrDataLoader(args.image_root, df, cols, transform=transform, class_id=True) loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, num_workers=args.num_workers ) random_loader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, num_workers=args.num_workers, ) # load model transfer = Conditional_UNet(num_classes=len(cols)) sd = torch.load(args.cp_path) transfer.load_state_dict(sd['inference']) estimator = torch.load(args.estimator_path) estimator.eval() transfer.cuda() estimator.cuda() bs = args.batch_size l1_li = np.empty((0, len(cols))) for i, (data, rnd) in tqdm(enumerate(zip(loader, random_loader)), total=len(df)//bs): batch = data[0].to('cuda') b_sig = data[1].to('cuda') # r_batch = rnd[0].to('cuda')