def __init__(self, args, models, loss, loader, ckpt): self.args = args self.train_loader = loader.train_loader self.test_loader = loader.test_loader self.query_loader = loader.query_loader self.testset = loader.testset self.queryset = loader.queryset self.losses = [] self.ckpt = ckpt self.loss = loss self.model = models[0] self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) self.device = torch.device('cpu' if args.cpu else 'cuda') # Configure second model for mutual learning if args.mutual_learning: self.model2 = models[1] self.optimizer2 = utility.make_optimizer(args, self.model2) if args.load != '': self.optimizer.load_state_dict( torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))) for _ in range(len(ckpt.log) * args.test_every): self.scheduler.step()
def __init__(self, args, model, loss, loader, ckpt): self.args = args self.train_loader = loader.train_loader self.train_batch_num = len(loader.trainset) // (args.batchid * args.batchimage) self.test_loader = loader.test_loader self.query_loader = loader.query_loader self.testset = loader.testset self.queryset = loader.queryset self.ckpt = ckpt self.model = model self.loss = loss self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) if args.nGPU > 1: self.mirrored_strategy = tf.distribute.MirroredStrategy() self.distributed_train_loader = self.mirrored_strategy.experimental_distribute_dataset(self.train_loader) if args.load != '': self.optimizer.load_state_dict( # torch.load(os.path.join(ckpt.dir, 'optimizer.pt')) keras.models.load_model(os.path.join(ckpt.dir, 'optimizer.h5')) ) for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
def __init__(self, args, model, loss, loader, ckpt): self.args = args self.train_loader = loader.train_loader self.test_loader = loader.test_loader self.trainset = loader.trainset self.testset = loader.testset self.evaluateset = loader.evaluateset self.evaluate_loader = loader.evaluate_loader self.class_index = self.evaluateset.activity_index self.index_class = dict(zip(self.class_index.values(), self.class_index.keys())) self.ckpt = ckpt self.model = model self.loss = loss self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) self.device = torch.device('cpu' if args.cpu else 'cuda') if args.load != '': self.optimizer.load_state_dict( torch.load(os.path.join(ckpt.dir, 'optimizer.pt')) ) for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
def __init__(self, args, model, loss, loader, ckpt): self.args = args self.train_loader = loader.train_loader self.test_loader = loader.test_loader self.query_loader = loader.query_loader self.testset = loader.testset self.queryset = loader.queryset self.ckpt = ckpt self.model = model self.loss = loss self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) self.apex = args.apex; if self.apex == "yes": self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level = "O1") self.device = torch.device('cpu' if args.cpu else 'cuda') self.feature_center = torch.zeros(args.num_classes, args.num_attentions * args.num_features).to(self.device) self.beta = args.L2_beta if args.load != '': self.optimizer.load_state_dict( torch.load(os.path.join(ckpt.dir, 'optimizer.pt')) ) self.feature_center = torch.load(os.path.join(ckpt.dir, 'feature_center.pt')).to(self.device) for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
def __init__(self, args, loader, device, neg_loader=None): self.model_str = args.model.lower() self.pic_path = f'./output/{self.model_str}/{args.model_filename}/' if not os.path.exists(self.pic_path): self.makedirs = os.makedirs(self.pic_path) self.teacher_model = args.teacher_model self.checkpoint_dir = args.pre_train self.model_filename = args.model_filename self.model_filepath = f'{self.model_filename}.pth' self.writer = SummaryWriter(f'log/{self.model_filename}') self.start_epoch = -1 self.device = device self.epochs = args.epochs self.init_lr = args.lr self.rgb_range = args.rgb_range self.scale = args.scale[0] self.stu_width_mult = args.stu_width_mult self.batch_size = args.batch_size self.neg_num = args.neg_num self.save_results = args.save_results self.self_ensemble = args.self_ensemble self.print_every = args.print_every self.best_psnr = 0 self.best_psnr_epoch = -1 self.loader = loader self.mean = [0.404, 0.436, 0.446] self.std = [0.288, 0.263, 0.275] self.build_model(args) self.upsampler = nn.Upsample(scale_factor=self.scale, mode='bicubic') self.optimizer = utility.make_optimizer(args, self.model) self.t_lambda = args.t_lambda self.contra_lambda = args.contra_lambda self.ad_lambda = args.ad_lambda self.percep_lambda = args.percep_lambda self.t_detach = args.contrast_t_detach self.contra_loss = ContrastLoss(args.vgg_weight, args.d_func, self.t_detach) self.l1_loss = nn.L1Loss() self.ad_loss = Adversarial(args, 'GAN') self.percep_loss = PerceptualLoss() self.t_l_remove = args.t_l_remove
def __init__(self, args, model, loss, loader, ckpt): self.args = args self.train_loader = loader.train_loader self.test_loader = loader.test_loader self.query_loader = loader.query_loader self.testset = loader.testset self.queryset = loader.queryset self.ckpt = ckpt self.model = model self.loss = loss self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.scheduler = utility.make_scheduler(args, self.optimizer) self.device = torch.device('cpu' if args.cpu else 'cuda') print(self.device) if args.load != '': self.optimizer.load_state_dict( torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))) for _ in range(len(ckpt.log) * args.test_every): self.scheduler.step()
def __init__(self, args, gan_type): super(Adversarial, self).__init__() self.gan_type = gan_type self.gan_k = args.gan_k self.dis = discriminator.Discriminator(args) if torch.cuda.is_available(): self.dis.cuda() if gan_type == 'WGAN_GP': # see https://arxiv.org/pdf/1704.00028.pdf pp.4 optim_dict = { 'optimizer': 'ADAM', 'betas': (0, 0.9), 'epsilon': 1e-8, 'lr': 1e-5, 'weight_decay': args.weight_decay, 'decay': args.decay, 'gamma': args.gamma } optim_args = SimpleNamespace(**optim_dict) else: optim_args = args self.optimizer = utility.make_optimizer(optim_args, self.dis)
def __init__(self, args, model, loss, loader, ckpt): self.args = args # if args.data_train == 'GTA': # transform_train_list = [ # # transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC) # transforms.Resize((384, 128), interpolation=3), # transforms.Pad(10), # transforms.RandomCrop((384, 128)), # transforms.RandomHorizontalFlip(), # transforms.ToTensor(), # transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ] # # train_dataset = datasets.ImageFolder(os.path.join(args.datadir, 'pytorch', 'train_all'), # # transforms.Compose(transform_train_list)) # train_dataset = datasets.ImageFolder(os.path.join(args.datadir, 'train'), # transforms.Compose(transform_train_list)) # self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchid * args.batchimage, sampler=a_RandomIdentitySampler( # train_dataset, args.batchid * args.batchimage, args.batchimage), num_workers=8, pin_memory=True) # 8 workers may work faster # print('GTA has {} classes'.format(train_dataset.classes)) # else: self.train_loader = loader.train_loader self.test_loader = loader.test_loader self.query_loader = loader.query_loader self.testset = loader.galleryset self.queryset = loader.queryset self.ckpt = ckpt self.model = model self.loss = loss self.lr = 0. self.optimizer = utility.make_optimizer(args, self.model) self.device = torch.device('cpu' if args.cpu else 'cuda') last_epoch = -1 if torch.cuda.is_available(): self.ckpt.write_log(torch.cuda.get_device_name(0)) if args.load != '': self.optimizer.load_state_dict( torch.load(os.path.join(ckpt.dir, 'optimizer.pt')) ) last_epoch = int(ckpt.log[-1, 0]) - 1 # for _ in range(last_epoch): # self.scheduler.step() if args.pre_train != '' and args.resume: resume_epoch = args.pre_train.split( '/')[-1].split('.')[0].split('_')[-1] self.optimizer.load_state_dict( torch.load(args.pre_train.replace('model', 'optimizer')) ) # for _ in range(len(ckpt.log) * args.test_every): # self.scheduler.step() last_epoch = resume_epoch - 1 self.scheduler = utility.make_scheduler( args, self.optimizer, last_epoch) self.ckpt.write_log( 'Continue from epoch {}'.format(self.scheduler.last_epoch)) print(ckpt.log) print(self.scheduler._last_lr)