Example #1
0
    def __init__(self, args, model, loss, loader, ckpt):
        self.args = args
        self.train_loader = loader.train_loader
        self.train_batch_num = len(loader.trainset) // (args.batchid * args.batchimage)
        self.test_loader = loader.test_loader
        self.query_loader = loader.query_loader
        self.testset = loader.testset
        self.queryset = loader.queryset

        self.ckpt = ckpt
        self.model = model
        self.loss = loss
        self.lr = 0.
        self.optimizer = utility.make_optimizer(args, self.model)
        self.scheduler = utility.make_scheduler(args, self.optimizer)

        if args.nGPU > 1:
            self.mirrored_strategy = tf.distribute.MirroredStrategy()
            self.distributed_train_loader = self.mirrored_strategy.experimental_distribute_dataset(self.train_loader)

        if args.load != '':
            self.optimizer.load_state_dict(
                # torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))
                keras.models.load_model(os.path.join(ckpt.dir, 'optimizer.h5'))
            )
            for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
Example #2
0
    def __init__(self, args, model, loss, loader, ckpt):
        self.args = args
        self.train_loader = loader.train_loader
        self.test_loader = loader.test_loader
        self.trainset = loader.trainset
        self.testset = loader.testset
        self.evaluateset = loader.evaluateset
        self.evaluate_loader = loader.evaluate_loader

        self.class_index = self.evaluateset.activity_index
        self.index_class = dict(zip(self.class_index.values(), self.class_index.keys()))

        self.ckpt = ckpt
        self.model = model
        self.loss = loss
        self.lr = 0.
        self.optimizer = utility.make_optimizer(args, self.model)
        self.scheduler = utility.make_scheduler(args, self.optimizer)
        self.device = torch.device('cpu' if args.cpu else 'cuda')

        if args.load != '':
            self.optimizer.load_state_dict(
                torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))
            )
            for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
Example #3
0
    def __init__(self, args, model, loss, loader, loader2, ckpt):
        self.args = args
        self.train_loader = loader.train_loader
        self.test_loader = loader.test_loader
        self.query_loader = loader.query_loader
        self.testset = loader.testset
        self.queryset = loader.queryset
        self.train_loader2 = loader2.train_loader
        self.test_loader2 = loader2.test_loader
        self.query_loader2 = loader2.query_loader
        self.testset2 = loader2.testset
        self.queryset2 = loader2.queryset
        self.losses = []
        self.ckpt = ckpt
        self.model = model
        self.loss = loss
        self.lr = 0.
        self.optimizer = utility.make_optimizer(args, self.model)
        self.scheduler = utility.make_scheduler(args, self.optimizer)
        self.device = torch.device('cpu' if args.cpu else 'cuda')

        if args.load != '':
            self.optimizer.load_state_dict(
                torch.load(os.path.join(ckpt.dir, 'optimizer.pt')))
            for _ in range(len(ckpt.log) * args.test_every):
                self.scheduler.step()
Example #4
0
 def __init__(self, args, model, loss, loader, ckpt):
     self.args = args
     self.train_loader = loader.train_loader
     self.test_loader = loader.test_loader
     self.query_loader = loader.query_loader
     self.testset = loader.testset
     self.queryset = loader.queryset
     
     self.ckpt = ckpt
     self.model = model
     self.loss = loss
     self.lr = 0.
     self.optimizer = utility.make_optimizer(args, self.model)
     self.scheduler = utility.make_scheduler(args, self.optimizer)
     self.apex = args.apex;
     if self.apex == "yes":
         self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level = "O1")
         
     self.device = torch.device('cpu' if args.cpu else 'cuda')
     self.feature_center = torch.zeros(args.num_classes, args.num_attentions * args.num_features).to(self.device)
     self.beta = args.L2_beta 
     if args.load != '':
         self.optimizer.load_state_dict(
             torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))
         )
         self.feature_center = torch.load(os.path.join(ckpt.dir, 'feature_center.pt')).to(self.device)
         for _ in range(len(ckpt.log)*args.test_every): self.scheduler.step()
Example #5
0
    def __init__(self, args, model, loss, loader, ckpt):
        self.args = args

        # if args.data_train == 'GTA':
        #     transform_train_list = [
        #         # transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
        #         transforms.Resize((384, 128), interpolation=3),
        #         transforms.Pad(10),
        #         transforms.RandomCrop((384, 128)),
        #         transforms.RandomHorizontalFlip(),
        #         transforms.ToTensor(),
        #         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        #     ]
        #     # train_dataset = datasets.ImageFolder(os.path.join(args.datadir, 'pytorch', 'train_all'),
        #     #                                      transforms.Compose(transform_train_list))
        #     train_dataset = datasets.ImageFolder(os.path.join(args.datadir, 'train'),
        #                                          transforms.Compose(transform_train_list))
        #     self.train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchid * args.batchimage, sampler=a_RandomIdentitySampler(
        #         train_dataset, args.batchid * args.batchimage, args.batchimage), num_workers=8, pin_memory=True)  # 8 workers may work faster
        #     print('GTA has {} classes'.format(train_dataset.classes))
        # else:
        self.train_loader = loader.train_loader

        self.test_loader = loader.test_loader
        self.query_loader = loader.query_loader
        self.testset = loader.galleryset
        self.queryset = loader.queryset

        self.ckpt = ckpt
        self.model = model
        self.loss = loss
        self.lr = 0.
        self.optimizer = utility.make_optimizer(args, self.model)
        self.device = torch.device('cpu' if args.cpu else 'cuda')

        last_epoch = -1

        if torch.cuda.is_available():
            self.ckpt.write_log(torch.cuda.get_device_name(0))

        if args.load != '':
            self.optimizer.load_state_dict(
                torch.load(os.path.join(ckpt.dir, 'optimizer.pt'))
            )
            last_epoch = int(ckpt.log[-1, 0]) - 1

            # for _ in range(last_epoch):
            #     self.scheduler.step()

        if args.pre_train != '' and args.resume:
            resume_epoch = args.pre_train.split(
                '/')[-1].split('.')[0].split('_')[-1]
            self.optimizer.load_state_dict(
                torch.load(args.pre_train.replace('model', 'optimizer'))
            )
            # for _ in range(len(ckpt.log) * args.test_every):
            #     self.scheduler.step()
            last_epoch = resume_epoch - 1

        self.scheduler = utility.make_scheduler(
            args, self.optimizer, last_epoch)

        self.ckpt.write_log(
            'Continue from epoch {}'.format(self.scheduler.last_epoch))

        print(ckpt.log)
        print(self.scheduler._last_lr)