def test_QueryMarginSampling(self): # init the AlExperiment al = ClassicActiveLearning(self.__X, self.__y, model=RandomForestClassifier( max_depth=2, random_state=0), stopping_criteria=UnlabeledSetEmpty()) # create a kfold experiment al.kfold(n_splits=10) # set the query strategy strategy = QueryMarginSampling() al.set_query_strategy(strategy=QueryMarginSampling()) # set the metric for experiment. al.set_performance_metric('accuracy_score') # by default,run in multi-thread. al.execute() # get the experiemnt result stateIO = al.get_experiment_result() # get a brief description of the experiment al.plot_learning_curve(title='Alexperiment result %s' % strategy.query_function_name)
def test_QueryInstanceRandom(self): # init the AlExperiment al = ClassicActiveLearning(self.__X, self.__y, model=RandomForestClassifier( max_depth=2, random_state=0), stopping_criteria=MaxIteration(value=10)) # create a kfold experiment al.kfold(n_splits=10) # set the query strategy strategy = QueryInstanceRandom() al.set_query_strategy(strategy=QueryInstanceRandom()) # set the metric for experiment. al.set_performance_metric('accuracy_score') # Execute the experiment al.execute(verbose=False) # get the experiemnt result # stateIO = al.get_experiment_result() # get a brief description of the experiment al.plot_learning_curve(title='Alexperiment result %s' % strategy.query_function_name)