Esempio n. 1
0
    def __init__(self, config):
        self.config = config

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(
            config=config, ignore_anomaly=False)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)

        if self.config.exp == 'openset':
            self.ano_class_id = self.source_loader.dataset.class_to_idx[
                self.config.anomaly_class]

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(),
                                    lr=self.config.lr,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(),
                                    lr=self.config.lrC,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.lr_scheduler_F = optim.lr_scheduler.StepLR(self.optimizerF,
                                                        step_size=7000,
                                                        gamma=0.1)
        self.lr_scheduler_C = optim.lr_scheduler.StepLR(self.optimizerC,
                                                        step_size=7000,
                                                        gamma=0.1)

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_data = torch.load(
                os.path.join(config.logdir, 'checkpoint.pth'))
            self.start_iter = ckpt_data['iter']
            self.netF.load_state_dict(ckpt_data['F_dict'])
            self.netC.load_state_dict(ckpt_data['C_dict'])
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.start_iter = 0

        # Other vars
        self.criterion = nn.CrossEntropyLoss().cuda()
Esempio n. 2
0
    def __init__(self, config):
        self.config = config
        self.device = 'cuda:0'

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(
            config=config, ignore_anomaly=True)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)
        self.netD = models.Classifier(self.nemb,
                                      1,
                                      nlayers=3,
                                      use_spectral=True)
        utils.weights_init(self.netD)
        print(self.netD)
        self.netF = self.netF.to(self.device)
        self.netC = self.netC.to(self.device)
        self.netD = self.netD.to(self.device)

        if self.config.exp == 'openset':
            self.ano_class_id = self.source_loader.dataset.class_to_idx[
                self.config.anomaly_class]

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()
        self.netD = torch.nn.DataParallel(self.netD).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(),
                                    lr=self.config.lr,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(),
                                    lr=self.config.lrC,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=self.config.lrD,
                                     betas=(0.9, 0.999))
        self.lr_scheduler_F = utils.InvLR(self.optimizerF,
                                          gamma=0.0001,
                                          power=0.75)
        self.lr_scheduler_C = utils.InvLR(self.optimizerC,
                                          gamma=0.0001,
                                          power=0.75)

        # creating losses
        self.loss_fn = losses.loss_factory[config.loss]
        self.entropy_criterion = losses.EntropyLoss()
        self.pseudo_frac = self.config.pseudo_frac

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_data = torch.load(
                os.path.join(config.logdir, 'checkpoint.pth'))
            self.start_iter = ckpt_data['iter']
            self.netF.load_state_dict(ckpt_data['F_dict'])
            self.netC.load_state_dict(ckpt_data['C_dict'])
            self.netD.load_state_dict(ckpt_data['D_dict'])
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.start_iter = 0
Esempio n. 3
0
    def __init__(self, config):
        self.config = config
        self.device = 'cuda:0'

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(config=config, ignore_anomaly=True)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)
        self.netD = models.Classifier(self.nemb, 1, nlayers=3, use_spectral=True)
        utils.weights_init(self.netD)
        print(self.netD)

        self.netF = self.netF.to(self.device)
        self.netC = self.netC.to(self.device)
        self.netD = self.netD.to(self.device)

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()
        self.netD = torch.nn.DataParallel(self.netD).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(), lr=self.config.lr, momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(), lr=self.config.lrC, momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=self.config.lrD, betas=(0.9, 0.999))

        self.lr_scheduler_F = utils.InvLR(self.optimizerF, gamma=0.0001, power=0.75)
        self.lr_scheduler_C = utils.InvLR(self.optimizerC, gamma=0.0001, power=0.75)

        # creating losses
        self.loss_fn = losses.loss_factory[config.loss]

        if self.config.weight_update_type == 'discrete':
            self.num_datapoints = len(self.target_loader.dataset)
            self.weight_vector = torch.FloatTensor(self.num_datapoints, ).fill_(1).to(self.device)
        else:
            self.netW = torch_models.resnet18(pretrained=True)
            self.netW.fc = nn.Linear(512, 1)
            self.netW = self.netW.to(self.device)
            self.netW = torch.nn.DataParallel(self.netW).cuda()
            self.optimizerW = optim.Adam(self.netW.parameters(), lr=self.config.lrD, betas=(0.9, 0.999))
            print(self.netW)

        self.weight_update_type = self.config.weight_update_type
        assert self.weight_update_type in ['cont', 'discrete']
        self.weight_thresh_list = [0, 0, 0]
        self.eps = 0.0001

        self.best_acc = 0
        self.entropy_criterion = losses.EntropyLoss()

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_path = os.path.join(config.logdir, 'model_state.pth')
            self.restore_state(ckpt_path)
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.itr = 0
            self.epoch = 0