def __init__(self, unlabeled_datasets=None, models=None, undersample_before_eval=False, weights=[1, 100], kernel_type=LINEAR): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets, models=models, undersample_before_eval=undersample_before_eval) self.unlabeled_datasets = unlabeled_datasets or [] # params correspond to each of the respective models (one if we're in a single feature space) # these specify things like what kind of kernel to use. here we just use the default, but # *you'll probably want to overwrite this* in your subclass. see the libsvm doc for more information # (in particular, svm_test.py is helpful). print "%s: using kernel type: %s" % (self.name, kernel_type) self.weights = weights self.kernel_type = kernel_type self.params = [ svm_parameter(weight=self.weights, kernel_type=self.kernel_type) for d in self.unlabeled_datasets ] self.div_hash = {}
def __init__(self, dataset_, model=None, rebuild_model_at_each_iter=True, name=""): BaseLearner.__init__(self, dataset_, model, rebuild_model_at_each_iter, "Random" + name) self.description = "RandomSampling selects new samples at random at each step"
def __init__(self, unlabeled_datasets = [], models = None): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets) # params correspond to each of the respective models (one if we're in a single feature space) # these specify things like what kind of kernel to use. here we just use the default, but # *you'll probably want to overwrite this* in your subclass. see the libsvm doc for more information (in particular, # svm_test.py is helpful). self.params = [svm_parameter() for d in unlabeled_datasets] self.div_hash = {}
def __init__(self, dataset_, model=None, rebuild_model_at_each_iter=True, name=""): BaseLearner.__init__(self, dataset_, model, rebuild_model_at_each_iter, "MaxEntropy" + name) self.description = "MaxEntropySampling chooses those samples from the pool, " \ "where the model entropy of p(y|x) is the highest"
def __init__(self, unlabeled_datasets = [], models = None, undersample_before_eval=False): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets, undersample_before_eval=undersample_before_eval) # params correspond to each of the respective models (one if we're in a single feature space) # these specify things like what kind of kernel to use. here we just use the default, but # *you'll probably want to overwrite this* in your subclass. see the libsvm doc for more information (in particular, # svm_test.py is helpful). self.params = [svm_parameter() for d in unlabeled_datasets] self.div_hash = {}
def __init__(self, dataset_, model=None, rebuild_model_at_each_iter=True, name=""): BaseLearner.__init__(self, dataset_, model, rebuild_model_at_each_iter, "LeastConfident" + name) if self.dataset.type == "classification" and len( self.dataset.classes) > 2: raise Exception( 'Least confident sampling is only defined for 2-class problems, this one has {} classes' .format(len(self.dataset.classes))) self.description = "LeastConfidentSampling chooses those samples from the pool, " \ "where the model is least confident"
def __init__(self, unlabeled_datasets = None, models = None, undersample_before_eval = False, weights=[1,100], kernel_type=LINEAR): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets, models=models, undersample_before_eval = undersample_before_eval) self.unlabeled_datasets = unlabeled_datasets or [] # params correspond to each of the respective models (one if we're in a single feature space) # these specify things like what kind of kernel to use. here we just use the default, but # *you'll probably want to overwrite this* in your subclass. see the libsvm doc for more information # (in particular, svm_test.py is helpful). print "%s: using kernel type: %s" % (self.name, kernel_type) self.weights = weights self.kernel_type = kernel_type self.params = [svm_parameter(weight=self.weights, kernel_type=self.kernel_type) for d in self.unlabeled_datasets] self.div_hash = {}
def __init__(self, dataset_, model=None, rebuild_model_at_each_iter=True, update_parameters_sample=True, min_prob=1e-15, name=""): if model is None: if dataset_.type == "classification": model = bayesian_models.BinBetaPrior( same_posterior_params=True) if dataset_.type == "regression": model = bayesian_models.GaussianPrior() BaseLearner.__init__(self, dataset_, model, rebuild_model_at_each_iter, "LindleyInformation" + name) self.description = "LindleyInformation chooses those samples from the pool, " \ "which provide maximum expected information gain" self.model.posterior_parameters_sample = None self.update_parameters_sample = update_parameters_sample self.min_prob = min_prob
def __init__(self, unlabeled_datasets, models=None, undersample_before_eval=False): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets, models=models, undersample_before_eval=undersample_before_eval)
def __init__(self, unlabeled_datasets, models=None): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets, models=models)
def __init__(self, unlabeled_datasets): BaseLearner.__init__(self, unlabeled_datasets=unlabeled_datasets)