Exemplo n.º 1
0
    def get_base_config(self, dataset):
        print("Preparing training D1 for %s" %
              (dataset.parent_dataset.__class__.__name__))

        all_loader = DataLoader(dataset,
                                batch_size=self.args.batch_size,
                                num_workers=self.args.workers,
                                pin_memory=True)

        # Set up the criterion
        criterion = nn.NLLLoss().cuda()

        # Set up the model
        model_class = Global.get_ref_classifier(
            dataset.name)[self.default_model]
        self.add_identifier = model_class.__name__

        # We must create 5 instances of this class.
        from models import get_ref_model_path
        all_models = []
        for mid in range(5):
            model = model_class()
            model = DeepEnsembleWrapper(model)
            model = model.to(self.args.device)
            h_path = get_ref_model_path(self.args,
                                        model_class.__name__,
                                        dataset.name,
                                        suffix_str='DE.%d' % mid)
            best_h_path = path.join(h_path, 'model.best.pth')
            if not path.isfile(best_h_path):
                raise NotImplementedError(
                    "Please use setup_model to pretrain the networks first! Can't find %s"
                    % best_h_path)
            else:
                print(colored('Loading H1 model from %s' % best_h_path, 'red'))
                model.load_state_dict(torch.load(best_h_path))
                model.eval()
            all_models.append(model)
        master_model = DeepEnsembleMasterWrapper(all_models)

        # Set up the config
        config = IterativeTrainerConfig()

        config.name = '%s-CLS' % (self.args.D1)
        config.phases = {
            'all': {
                'dataset': all_loader,
                'backward': False
            },
        }
        config.criterion = criterion
        config.classification = True
        config.cast_float_label = False
        config.stochastic_gradient = True
        config.model = master_model
        config.optim = None
        config.autoencoder_target = False
        config.visualize = False
        config.logger = Logger()
        return config
Exemplo n.º 2
0
def get_ae_config(args, model, dataset, BCE_Loss):
    print("Preparing training D1 for %s"%(dataset.name))

    # 80%, 20% for local train+test
    train_ds, valid_ds = dataset.split_dataset(0.8)

    if dataset.name in Global.mirror_augment:
        print(colored("Mirror augmenting %s"%dataset.name, 'green'))
        new_train_ds = train_ds + MirroredDataset(train_ds)
        train_ds = new_train_ds

    # Initialize the multi-threaded loaders.
    train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True)
    valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)
    all_loader   = DataLoader(dataset,  batch_size=args.batch_size, num_workers=args.workers, pin_memory=True)

    # Set up the model
    model = model.to(args.device)

    # Set up the criterion
    criterion = None
    if BCE_Loss:
        criterion = nn.BCEWithLogitsLoss().to(args.device)
    else:
        criterion = nn.MSELoss().to(args.device)
        model.default_sigmoid = True

    # Set up the config
    config = IterativeTrainerConfig()

    config.name = 'autoencoder_%s_%s'%(dataset.name, model.preferred_name())

    config.train_loader = train_loader
    config.valid_loader = valid_loader
    config.phases = {
                    'train':   {'dataset' : train_loader,  'backward': True},
                    'test':    {'dataset' : valid_loader,  'backward': False},
                    'all':     {'dataset' : all_loader,    'backward': False},                        
                    }
    config.criterion = criterion
    config.classification = False
    config.cast_float_label = False
    config.autoencoder_target = True
    config.stochastic_gradient = True
    config.visualize = not args.no_visualize
    config.sigmoid_viz = BCE_Loss
    config.model = model
    config.logger = Logger()

    config.optim = optim.Adam(model.parameters(), lr=1e-3)
    config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True)
    config.max_epoch = 120
    
    if hasattr(model, 'train_config'):
        model_train_config = model.train_config()
        for key, value in model_train_config.iteritems():
            print('Overriding config.%s'%key)
            config.__setattr__(key, value)

    return config
Exemplo n.º 3
0
    def get_base_config(self, dataset):
        print("Preparing training D1 for %s" %
              (dataset.parent_dataset.__class__.__name__))

        all_loader = DataLoader(dataset,
                                batch_size=self.args.batch_size,
                                num_workers=self.args.workers,
                                pin_memory=True)

        # Set up the model
        model = Global.get_ref_pixelcnn(dataset.name)[self.default_model]().to(
            self.args.device)
        self.add_identifier = model.__class__.__name__

        # Load the snapshot
        from models import get_ref_model_path
        h_path = get_ref_model_path(self.args,
                                    model.__class__.__name__,
                                    dataset.name,
                                    suffix_str=model.netid)
        best_h_path = path.join(h_path, 'model.best.pth')
        if not path.isfile(best_h_path):
            raise NotImplementedError(
                "Please use setup_model to pretrain the networks first! Can't find %s"
                % best_h_path)
        else:
            print(colored('Loading H1 model from %s' % best_h_path, 'red'))
            model.load_state_dict(torch.load(best_h_path))
            model.eval()

        # Set up the criterion
        criterion = PCNN_Loss(one_d=(model.input_channels == 1)).to(
            self.args.device)

        # Set up the config
        config = IterativeTrainerConfig()

        config.name = '%s-pcnn' % (self.args.D1)
        config.phases = {
            'all': {
                'dataset': all_loader,
                'backward': False
            },
        }
        config.criterion = criterion
        config.classification = False
        config.cast_float_label = False
        config.autoencoder_target = True
        config.stochastic_gradient = True
        config.model = model
        config.optim = None
        config.visualize = False
        config.logger = Logger()
        return config
Exemplo n.º 4
0
    def get_base_config(self, dataset):
        print("Preparing training D1 for %s" % (dataset.name))

        # Initialize the multi-threaded loaders.
        all_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=self.args.workers,
                                pin_memory=True)

        # Set up the model
        model = Global.get_ref_ali(dataset.name)[0]().to(self.args.device)

        # Set up the criterion
        criterion = None
        if self.default_model == 0:
            criterion = nn.BCEWithLogitsLoss().to(self.args.device)
        else:
            criterion = nn.MSELoss().to(self.args.device)
            model.default_sigmoid = True

        # Set up the config
        config = IterativeTrainerConfig()

        config.name = '%s-ALIAE1' % (self.args.D1)
        config.phases = {
            'all': {'dataset': all_loader, 'backward': False},
        }
        config.criterion = criterion
        config.classification = False
        config.cast_float_label = False
        config.autoencoder_target = True
        config.stochastic_gradient = True
        config.visualize = not self.args.no_visualize
        config.sigmoid_viz = self.default_model == 0
        config.model = model
        config.optim = None

        h_path = path.join(self.args.experiment_path, '%s' % (self.__class__.__name__),
                           '%d' % (self.default_model),
                           '%s-%s.pth' % (self.args.D1, self.args.D2))
        h_parent = path.dirname(h_path)

        config.logger = Logger(h_parent)

        return config
Exemplo n.º 5
0
    def get_base_config(self, dataset):
        print("Preparing training D1 for %s" % (dataset.name))

        all_loader = DataLoader(dataset,
                                batch_size=self.args.batch_size,
                                num_workers=self.args.workers,
                                pin_memory=True)

        # Set up the criterion
        criterion = nn.NLLLoss().to(self.args.device)

        # Set up the model
        import global_vars as Global
        model = Global.get_ref_classifier(
            dataset.name)[self.default_model]().to(self.args.device)
        self.add_identifier = model.__class__.__name__
        if hasattr(model, 'preferred_name'):
            self.add_identifier = model.preferred_name()

        # Set up the config
        config = IterativeTrainerConfig()

        config.name = '%s-CLS' % (self.args.D1)
        config.phases = {
            'all': {
                'dataset': all_loader,
                'backward': False
            },
        }
        config.criterion = criterion
        config.classification = True
        config.cast_float_label = False
        config.stochastic_gradient = True
        config.model = model
        config.optim = None
        config.autoencoder_target = False
        config.visualize = False
        config.logger = Logger()
        return config
Exemplo n.º 6
0
def get_pcnn_config(args, model, home_path, dataset):
    print("Preparing training D1 for %s" % (dataset.name))

    sample_im, _ = dataset[0]
    obs = sample_im.size()
    obs = [int(d) for d in obs]

    # 80%, 20% for local train+test
    train_ds, valid_ds = dataset.split_dataset(0.8)

    if dataset.name in Global.mirror_augment:
        print(colored("Mirror augmenting %s" % dataset.name, 'green'))
        new_train_ds = train_ds + MirroredDataset(train_ds)
        train_ds = new_train_ds

    # Initialize the multi-threaded loaders.
    train_loader = DataLoader(train_ds,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=True)
    valid_loader = DataLoader(valid_ds,
                              batch_size=args.batch_size,
                              num_workers=args.workers,
                              pin_memory=True)
    all_loader = DataLoader(dataset,
                            batch_size=args.batch_size,
                            num_workers=args.workers,
                            pin_memory=True)

    # Set up the model
    model = model.to(args.device)

    # Set up the criterion
    criterion = pcnn_utils.PCNN_Loss(one_d=(model.input_channels == 1))

    # Set up the config
    config = IterativeTrainerConfig()

    config.name = 'PCNN_%s_%s' % (dataset.name, model.preferred_name())

    config.train_loader = train_loader
    config.valid_loader = valid_loader
    config.phases = {
        'train': {
            'dataset': train_loader,
            'backward': True
        },
        'test': {
            'dataset': valid_loader,
            'backward': False
        },
        'all': {
            'dataset': all_loader,
            'backward': False
        },
    }
    config.criterion = criterion
    config.classification = False
    config.cast_float_label = False
    config.autoencoder_target = True
    config.stochastic_gradient = True
    config.visualize = not args.no_visualize
    config.model = model
    config.logger = Logger(home_path)
    config.sampler = lambda x: sample(x.model, 32, obs)

    config.optim = optim.Adam(model.parameters(), lr=1e-3)
    config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim,
                                                            patience=10,
                                                            threshold=1e-2,
                                                            min_lr=1e-5,
                                                            factor=0.1,
                                                            verbose=True)
    config.max_epoch = 60

    if hasattr(model, 'train_config'):
        model_train_config = model.train_config()
        for key, value in model_train_config.items():
            print('Overriding config.%s' % key)
            config.__setattr__(key, value)

    return config