Beispiel #1
0
    def __init__(self, config):
        self.config = config

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(
            config=config, ignore_anomaly=False)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)

        if self.config.exp == 'openset':
            self.ano_class_id = self.source_loader.dataset.class_to_idx[
                self.config.anomaly_class]

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(),
                                    lr=self.config.lr,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(),
                                    lr=self.config.lrC,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.lr_scheduler_F = optim.lr_scheduler.StepLR(self.optimizerF,
                                                        step_size=7000,
                                                        gamma=0.1)
        self.lr_scheduler_C = optim.lr_scheduler.StepLR(self.optimizerC,
                                                        step_size=7000,
                                                        gamma=0.1)

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_data = torch.load(
                os.path.join(config.logdir, 'checkpoint.pth'))
            self.start_iter = ckpt_data['iter']
            self.netF.load_state_dict(ckpt_data['F_dict'])
            self.netC.load_state_dict(ckpt_data['C_dict'])
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.start_iter = 0

        # Other vars
        self.criterion = nn.CrossEntropyLoss().cuda()
Beispiel #2
0
def visualize(args):

    num_vis = 100

    # Forming config
    with open(args.cfg_path) as json_file:
        config = json.load(json_file)
    config = utils.ConfigMapper(config)

    # Create dataloader
    source_loader, target_loader, nclasses = datasets.form_visda_datasets(
        config=config, ignore_anomaly=True)

    # Loading model state
    model_state = torch.load(os.path.join(args.results_path,
                                          'model_state.pth'))

    weight_vector = model_state['weight_vector']
    indices_sorted = torch.argsort(weight_vector)
    num = weight_vector.shape[0]
    sampling_interval = int(num / num_vis)
    indices_sampled = indices_sorted[0:num:sampling_interval]

    path_vector_all = target_loader.dataset.samples
    paths = []
    for ind in indices_sampled:
        paths.append(path_vector_all[ind][0])
        print(weight_vector[ind])

    imgs = read_images(paths)
    vutils.save_image(imgs,
                      '{}/weight_vis.png'.format(args.results_path),
                      nrow=10)

    weight_vector_np = weight_vector.cpu().numpy()
    plt.figure()
    plt.rcParams.update({'font.size': 19})
    plt.gcf().subplots_adjust(bottom=0.15)
    plt.hist(weight_vector_np, bins=200)
    plt.xlabel('Weight')
    plt.ylabel('Count')
    plt.yticks([0, 1000, 2000, 3000, 4000, 5000, 6000])
    plt.savefig('{}/weight_hist.png'.format(args.results_path), dpi=300)
Beispiel #3
0
def visualize(args):

    num_vis = 100

    # Forming config
    with open(args.cfg_path) as json_file:
        config = json.load(json_file)
    config = utils.ConfigMapper(config)

    # Create dataloader
    source_loader, target_loader, nclasses = datasets.form_visda_datasets(config=config, ignore_anomaly=True)
    nclasses = 12
    
    model_state = torch.load(os.path.join(args.results_path, 'model_state.pth'))
    weight_vector = model_state['weight_vector']
    weight_vector = weight_vector.cpu().numpy()

    source_count_list = [0] * nclasses
    target_count_list = [0] * nclasses 
    weight_count_list = [0] * nclasses
    
    source_samples = source_loader.dataset.samples
    for sample in source_samples:
        source_count_list[sample[1]] += 1

    target_samples = target_loader.dataset.samples
    for i, sample in enumerate(target_samples):
        target_count_list[sample[1]] += 1
        weight_count_list[sample[1]] += weight_vector[i]
    
    source_count_list = np.array(source_count_list)
    target_count_list = np.array(target_count_list)
    weight_count_list = np.array(weight_count_list)
    
    source_count_list = source_count_list / np.sum(source_count_list)
    ntarget = np.sum(target_count_list)
    target_count_list = target_count_list / ntarget
    weight_count_list = (weight_count_list / ntarget) * nclasses

    print(source_count_list)
    print(target_count_list)
    print(weight_count_list * target_count_list)
    def __init__(self, config):
        self.config = config
        self.device = 'cuda:0'

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(
            config=config, ignore_anomaly=True)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)
        self.netD = models.Classifier(self.nemb,
                                      1,
                                      nlayers=3,
                                      use_spectral=True)
        utils.weights_init(self.netD)
        print(self.netD)
        self.netF = self.netF.to(self.device)
        self.netC = self.netC.to(self.device)
        self.netD = self.netD.to(self.device)

        if self.config.exp == 'openset':
            self.ano_class_id = self.source_loader.dataset.class_to_idx[
                self.config.anomaly_class]

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()
        self.netD = torch.nn.DataParallel(self.netD).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(),
                                    lr=self.config.lr,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(),
                                    lr=self.config.lrC,
                                    momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerD = optim.Adam(self.netD.parameters(),
                                     lr=self.config.lrD,
                                     betas=(0.9, 0.999))
        self.lr_scheduler_F = utils.InvLR(self.optimizerF,
                                          gamma=0.0001,
                                          power=0.75)
        self.lr_scheduler_C = utils.InvLR(self.optimizerC,
                                          gamma=0.0001,
                                          power=0.75)

        # creating losses
        self.loss_fn = losses.loss_factory[config.loss]
        self.entropy_criterion = losses.EntropyLoss()
        self.pseudo_frac = self.config.pseudo_frac

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_data = torch.load(
                os.path.join(config.logdir, 'checkpoint.pth'))
            self.start_iter = ckpt_data['iter']
            self.netF.load_state_dict(ckpt_data['F_dict'])
            self.netC.load_state_dict(ckpt_data['C_dict'])
            self.netD.load_state_dict(ckpt_data['D_dict'])
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.start_iter = 0
Beispiel #5
0
    def __init__(self, config):
        self.config = config
        self.device = 'cuda:0'

        # Create dataloader
        source_loader, target_loader, nclasses = datasets.form_visda_datasets(config=config, ignore_anomaly=True)
        self.source_loader = source_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        self.netF, self.nemb = models.form_models(config)
        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)
        self.netD = models.Classifier(self.nemb, 1, nlayers=3, use_spectral=True)
        utils.weights_init(self.netD)
        print(self.netD)

        self.netF = self.netF.to(self.device)
        self.netC = self.netC.to(self.device)
        self.netD = self.netD.to(self.device)

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()
        self.netD = torch.nn.DataParallel(self.netD).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(), lr=self.config.lr, momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(), lr=self.config.lrC, momentum=config.momentum,
                                    weight_decay=0.0005)
        self.optimizerD = optim.Adam(self.netD.parameters(), lr=self.config.lrD, betas=(0.9, 0.999))

        self.lr_scheduler_F = utils.InvLR(self.optimizerF, gamma=0.0001, power=0.75)
        self.lr_scheduler_C = utils.InvLR(self.optimizerC, gamma=0.0001, power=0.75)

        # creating losses
        self.loss_fn = losses.loss_factory[config.loss]

        if self.config.weight_update_type == 'discrete':
            self.num_datapoints = len(self.target_loader.dataset)
            self.weight_vector = torch.FloatTensor(self.num_datapoints, ).fill_(1).to(self.device)
        else:
            self.netW = torch_models.resnet18(pretrained=True)
            self.netW.fc = nn.Linear(512, 1)
            self.netW = self.netW.to(self.device)
            self.netW = torch.nn.DataParallel(self.netW).cuda()
            self.optimizerW = optim.Adam(self.netW.parameters(), lr=self.config.lrD, betas=(0.9, 0.999))
            print(self.netW)

        self.weight_update_type = self.config.weight_update_type
        assert self.weight_update_type in ['cont', 'discrete']
        self.weight_thresh_list = [0, 0, 0]
        self.eps = 0.0001

        self.best_acc = 0
        self.entropy_criterion = losses.EntropyLoss()

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_path = os.path.join(config.logdir, 'model_state.pth')
            self.restore_state(ckpt_path)
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.itr = 0
            self.epoch = 0
Beispiel #6
0
    def __init__(self, args):
        self.args = args

        # Create dataloader
        source_train_loader, source_val_loader, target_loader, nclasses = datasets.form_visda_datasets(
            config=args)
        self.source_train_loader = source_train_loader
        self.source_val_loader = source_val_loader
        self.target_loader = target_loader
        self.nclasses = nclasses

        # Create model
        if args.model == 'resnet18':
            self.netF = models.resnet18(pretrained=True)
            self.nemb = 512
        elif args.model == 'resnet34':
            self.netF = models.resnet34(pretrained=True)
            self.nemb = 512
        elif args.model == 'resnet50':
            self.netF = models.resnet50(pretrained=True)
            self.nemb = 2048
        elif args.model == 'resnet101':
            self.netF = models.resnet101(pretrained=True)
            self.nemb = 2048
        elif args.model == 'resnet152':
            self.netF = models.resnet152(pretrained=True)
            self.nemb = 2048
        else:
            raise ValueError('Model cannot be recognized.')

        print(self.netF)
        self.netC = models.Classifier(self.nemb, self.nclasses, nlayers=1)
        utils.weights_init(self.netC)
        print(self.netC)

        self.netF = torch.nn.DataParallel(self.netF).cuda()
        self.netC = torch.nn.DataParallel(self.netC).cuda()

        # Create optimizer
        self.optimizerF = optim.SGD(self.netF.parameters(),
                                    lr=self.args.lr,
                                    momentum=args.momentum,
                                    weight_decay=0.0005)
        self.optimizerC = optim.SGD(self.netC.parameters(),
                                    lr=self.args.lrC,
                                    momentum=args.momentum,
                                    weight_decay=0.0005)
        self.lr_scheduler_F = optim.lr_scheduler.StepLR(self.optimizerF,
                                                        step_size=7000,
                                                        gamma=0.1)
        self.lr_scheduler_C = optim.lr_scheduler.StepLR(self.optimizerC,
                                                        step_size=7000,
                                                        gamma=0.1)

        # restoring checkpoint
        print('Restoring checkpoint ...')
        try:
            ckpt_data = torch.load(
                os.path.join(args.save_path, 'checkpoint.pth'))
            self.start_iter = ckpt_data['iter']
            self.netF.load_state_dict(ckpt_data['F_dict'])
            self.netC.load_state_dict(ckpt_data['C_dict'])
        except:
            # If loading failed, begin from scratch
            print('Checkpoint not found. Training from scratch ...')
            self.start_iter = 0

        # Other vars
        self.criterion = nn.CrossEntropyLoss().cuda()
        self.vat_pert_gen = VATPerturbationGenerator(xi=10.0, eps=1.0, ip=1)
        self.entropy_criterion = EntropyLoss()