def evaluation_cross_validation_classification(traindat=traindat,
                                               label_traindat=label_traindat):
    from modshogun import CrossValidation, CrossValidationResult
    from modshogun import ContingencyTableEvaluation, ACCURACY
    from modshogun import StratifiedCrossValidationSplitting
    from modshogun import BinaryLabels
    from modshogun import RealFeatures
    from modshogun import LibLinear, L2R_L2LOSS_SVC

    # training data
    features = RealFeatures(traindat)
    labels = BinaryLabels(label_traindat)

    # classifier
    classifier = LibLinear(L2R_L2LOSS_SVC)

    # splitting strategy for 5 fold cross-validation (for classification its better
    # to use "StratifiedCrossValidation", but the standard
    # "CrossValidationSplitting" is also available
    splitting_strategy = StratifiedCrossValidationSplitting(labels, 5)

    # evaluation method
    evaluation_criterium = ContingencyTableEvaluation(ACCURACY)

    # cross-validation instance
    cross_validation = CrossValidation(classifier, features, labels,
                                       splitting_strategy,
                                       evaluation_criterium)
    cross_validation.set_autolock(False)

    # (optional) repeat x-val 10 times
    cross_validation.set_num_runs(10)

    # perform cross-validation and print(results)
    result = cross_validation.evaluate()
Esempio n. 2
0
def evaluation_multiclassovrevaluation_modular(traindat, label_traindat,
                                               testdat, label_testdat):
    from shogun.Features import MulticlassLabels
    from shogun.Evaluation import MulticlassOVREvaluation, ROCEvaluation
    from modshogun import MulticlassLibLinear, RealFeatures, ContingencyTableEvaluation, ACCURACY
    from shogun.Mathematics import Math

    Math.init_random(1)

    ground_truth_labels = MulticlassLabels(label_traindat)
    svm = MulticlassLibLinear(1.0, RealFeatures(traindat),
                              MulticlassLabels(label_traindat))
    svm.train()
    predicted_labels = svm.apply()

    binary_evaluator = ROCEvaluation()
    evaluator = MulticlassOVREvaluation(binary_evaluator)
    mean_roc = evaluator.evaluate(predicted_labels, ground_truth_labels)
    #print mean_roc

    binary_evaluator = ContingencyTableEvaluation(ACCURACY)
    evaluator = MulticlassOVREvaluation(binary_evaluator)
    mean_accuracy = evaluator.evaluate(predicted_labels, ground_truth_labels)
    #print mean_accuracy

    return mean_roc, mean_accuracy
def modelselection_random_search_liblinear_modular (traindat=traindat, label_traindat=label_traindat):
    from modshogun import CrossValidation, CrossValidationResult
    from modshogun import ContingencyTableEvaluation, ACCURACY
    from modshogun import StratifiedCrossValidationSplitting
    from modshogun import RandomSearchModelSelection
    from modshogun import ModelSelectionParameters, R_EXP
    from modshogun import ParameterCombination
    from modshogun import BinaryLabels
    from modshogun import RealFeatures
    from modshogun import LibLinear, L2R_L2LOSS_SVC

    # build parameter tree to select C1 and C2
    param_tree_root=ModelSelectionParameters()
    c1=ModelSelectionParameters("C1");
    param_tree_root.append_child(c1)
    c1.build_values(-2.0, 2.0, R_EXP);

    c2=ModelSelectionParameters("C2");
    param_tree_root.append_child(c2);
    c2.build_values(-2.0, 2.0, R_EXP);

    # training data
    features=RealFeatures(traindat)
    labels=BinaryLabels(label_traindat)

    # classifier
    classifier=LibLinear(L2R_L2LOSS_SVC)

    # print all parameter available for modelselection
    # Dont worry if yours is not included but, write to the mailing list
    #classifier.print_modsel_params()

    # splitting strategy for cross-validation
    splitting_strategy=StratifiedCrossValidationSplitting(labels, 10)

    # evaluation method
    evaluation_criterium=ContingencyTableEvaluation(ACCURACY)

    # cross-validation instance
    cross_validation=CrossValidation(classifier, features, labels,
                                     splitting_strategy, evaluation_criterium)
    cross_validation.set_autolock(False)

    # model selection instance
    model_selection=RandomSearchModelSelection(cross_validation, param_tree_root, 0.5)

    # perform model selection with selected methods
    #print "performing model selection of"
    #param_tree_root.print_tree()
    best_parameters=model_selection.select_model()

    # print best parameters
    #print "best parameters:"
    #best_parameters.print_tree()

    # apply them and print result
    best_parameters.apply_to_machine(classifier)
    result=cross_validation.evaluate()
Esempio n. 4
0
def modelselection_grid_search_kernel(num_subsets, num_vectors, dim_vectors):
    # init seed for reproducability
    Math.init_random(1)
    random.seed(1)

    # create some (non-sense) data
    matrix = random.rand(dim_vectors, num_vectors)

    # create num_feautres 2-dimensional vectors
    features = RealFeatures()
    features.set_feature_matrix(matrix)

    # create labels, two classes
    labels = BinaryLabels(num_vectors)
    for i in range(num_vectors):
        labels.set_label(i, 1 if i % 2 == 0 else -1)

    # create svm
    classifier = LibSVM()

    # splitting strategy
    splitting_strategy = StratifiedCrossValidationSplitting(
        labels, num_subsets)

    # accuracy evaluation
    evaluation_criterion = ContingencyTableEvaluation(ACCURACY)

    # cross validation class for evaluation in model selection
    cross = CrossValidation(classifier, features, labels, splitting_strategy,
                            evaluation_criterion)
    cross.set_num_runs(1)

    # print all parameter available for modelselection
    # Dont worry if yours is not included, simply write to the mailing list
    #classifier.print_modsel_params()

    # model parameter selection
    param_tree = create_param_tree()
    #param_tree.print_tree()

    grid_search = GridSearchModelSelection(cross, param_tree)

    print_state = False
    best_combination = grid_search.select_model(print_state)
    #print("best parameter(s):")
    #best_combination.print_tree()

    best_combination.apply_to_machine(classifier)

    # larger number of runs to have tighter confidence intervals
    cross.set_num_runs(10)
    cross.set_conf_int_alpha(0.01)
    result = cross.evaluate()
    casted = CrossValidationResult.obtain_from_generic(result)
    #print "result mean:", casted.mean

    return classifier, result, casted.mean
def evaluation_cross_validation_mkl_weight_storage(
        traindat=traindat, label_traindat=label_traindat):
    from modshogun import CrossValidation, CrossValidationResult
    from modshogun import CrossValidationPrintOutput
    from modshogun import CrossValidationMKLStorage
    from modshogun import ContingencyTableEvaluation, ACCURACY
    from modshogun import StratifiedCrossValidationSplitting
    from modshogun import BinaryLabels
    from modshogun import RealFeatures, CombinedFeatures
    from modshogun import GaussianKernel, CombinedKernel
    from modshogun import LibSVM, MKLClassification

    # training data, combined features all on same data
    features = RealFeatures(traindat)
    comb_features = CombinedFeatures()
    comb_features.append_feature_obj(features)
    comb_features.append_feature_obj(features)
    comb_features.append_feature_obj(features)
    labels = BinaryLabels(label_traindat)

    # kernel, different Gaussians combined
    kernel = CombinedKernel()
    kernel.append_kernel(GaussianKernel(10, 0.1))
    kernel.append_kernel(GaussianKernel(10, 1))
    kernel.append_kernel(GaussianKernel(10, 2))

    # create mkl using libsvm, due to a mem-bug, interleaved is not possible
    svm = MKLClassification(LibSVM())
    svm.set_interleaved_optimization_enabled(False)
    svm.set_kernel(kernel)

    # splitting strategy for 5 fold cross-validation (for classification its better
    # to use "StratifiedCrossValidation", but the standard
    # "StratifiedCrossValidationSplitting" is also available
    splitting_strategy = StratifiedCrossValidationSplitting(labels, 5)

    # evaluation method
    evaluation_criterium = ContingencyTableEvaluation(ACCURACY)

    # cross-validation instance
    cross_validation = CrossValidation(svm, comb_features, labels,
                                       splitting_strategy,
                                       evaluation_criterium)
    cross_validation.set_autolock(False)

    # append cross vlaidation output classes
    #cross_validation.add_cross_validation_output(CrossValidationPrintOutput())
    mkl_storage = CrossValidationMKLStorage()
    cross_validation.add_cross_validation_output(mkl_storage)
    cross_validation.set_num_runs(3)

    # perform cross-validation
    result = cross_validation.evaluate()

    # print mkl weights
    weights = mkl_storage.get_mkl_weights()
Esempio n. 6
0
def evaluation_contingencytableevaluation_modular(ground_truth, predicted):
    from modshogun import BinaryLabels
    from modshogun import ContingencyTableEvaluation
    from modshogun import AccuracyMeasure, ErrorRateMeasure, BALMeasure
    from modshogun import WRACCMeasure, F1Measure, CrossCorrelationMeasure
    from modshogun import RecallMeasure, PrecisionMeasure, SpecificityMeasure

    ground_truth_labels = BinaryLabels(ground_truth)
    predicted_labels = BinaryLabels(predicted)

    base_evaluator = ContingencyTableEvaluation()
    base_evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = AccuracyMeasure()
    accuracy = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = ErrorRateMeasure()
    errorrate = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = BALMeasure()
    bal = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = WRACCMeasure()
    wracc = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = F1Measure()
    f1 = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = CrossCorrelationMeasure()
    crosscorrelation = evaluator.evaluate(predicted_labels,
                                          ground_truth_labels)

    evaluator = RecallMeasure()
    recall = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = PrecisionMeasure()
    precision = evaluator.evaluate(predicted_labels, ground_truth_labels)

    evaluator = SpecificityMeasure()
    specificity = evaluator.evaluate(predicted_labels, ground_truth_labels)

    return accuracy, errorrate, bal, wracc, f1, crosscorrelation, recall, precision, specificity
def evaluation_contingencytableevaluation_modular (ground_truth, predicted):
	from modshogun import BinaryLabels
	from modshogun import ContingencyTableEvaluation
	from modshogun import AccuracyMeasure,ErrorRateMeasure,BALMeasure
	from modshogun import WRACCMeasure,F1Measure,CrossCorrelationMeasure
	from modshogun import RecallMeasure,PrecisionMeasure,SpecificityMeasure

	ground_truth_labels = BinaryLabels(ground_truth)
	predicted_labels = BinaryLabels(predicted)

	base_evaluator = ContingencyTableEvaluation()
	base_evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = AccuracyMeasure()
	accuracy = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = ErrorRateMeasure()
	errorrate = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = BALMeasure()
	bal = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = WRACCMeasure()
	wracc = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = F1Measure()
	f1 = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = CrossCorrelationMeasure()
	crosscorrelation = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = RecallMeasure()
	recall = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = PrecisionMeasure()
	precision = evaluator.evaluate(predicted_labels,ground_truth_labels)

	evaluator = SpecificityMeasure()
	specificity = evaluator.evaluate(predicted_labels,ground_truth_labels)

	return accuracy, errorrate, bal, wracc, f1, crosscorrelation, recall, precision, specificity