def testNB(training_data, testing_data):

    train_data = Instances.copy_instances(training_data)
    test_data = Instances.copy_instances(testing_data)

    evaluation = Evaluation(train_data)
    classifier = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
    classifier.build_classifier(
        train_data)  # build classifier on the training data
    evaluation.test_model(classifier,
                          test_data)  # test and evaluate model on the test set
    print("")
    print("")
    print(
        evaluation.summary(
            "--------------Naive Bayes Evaluation--------------"))
    print("Accuracy: " + str(evaluation.percent_correct))
    print("")
    print("Label\tPrecision\t\tRecall\t\t\tF-Measure")
    print("<=50K\t" + str(evaluation.precision(0)) + "\t" +
          str(evaluation.recall(0)) + "\t" + str(evaluation.f_measure(0)))
    print(">50K\t" + str(evaluation.precision(1)) + "\t" +
          str(evaluation.recall(1)) + "\t" + str(evaluation.f_measure(1)))
    print("Mean\t" + str(((evaluation.precision(1)) +
                          (evaluation.precision(0))) / 2) + "\t" +
          str(((evaluation.recall(1)) + (evaluation.recall(0))) / 2) + "\t" +
          str(((evaluation.f_measure(1)) + (evaluation.f_measure(0))) / 2))
def split_data(data, test_size):  # split the data
    # create placeholder for train split
    data_train = Instances.copy_instances(data)
    # remove all instances from the placeholder
    for i in reversed(range(len(data_train))):
        data_train.delete(i)

    # create placeholder for test split
    data_test = Instances.copy_instances(data)
    # remove all instances from the placeholder
    for i in reversed(range(len(data_test))):
        data_test.delete(i)

    # create list of indices
    indices = list(range(len(data)))
    # shuffle indices
    random.shuffle(indices)
    # calculate number of indices in the test split
    num_test = int(round(len(indices) * test_size, 0))

    # get indices for the test split
    test_ids = indices[:num_test]
    # fill test split with instances
    for idx in test_ids:
        data_test.add_instance(data.get_instance(idx))

    # get indices for the train split
    train_ids = indices[num_test:]
    # fill train split with instances
    for idx in train_ids:
        data_train.add_instance(data.get_instance(idx))

    return data_train, data_test
def LabeledUnlabeldata(data, unlabeled, tree, y, cal_method=None):

    data1 = Instances.copy_instances(data)
    labeling = Instances.copy_instances(unlabeled)
    tree.build_classifier(data1)

    j = i = s = l = 0

    while i < labeling.num_instances:
        clsLabel = tree.classify_instance(labeling.get_instance(i))

        ##### probability calculation #####
        # dist = tree.distribution_for_instance(labeling.get_instance(i))
        dist = calculate_probability_distribution(tree, labeling, i,
                                                  cal_method)

        for k, dk in enumerate(dist):
            if dk >= y:

                j = i
                while j < labeling.num_instances:
                    clsLabel = tree.classify_instance(labeling.get_instance(j))

                    ##### probability calculation #####
                    # dist = tree.distribution_for_instance(labeling.get_instance(j))
                    dist = calculate_probability_distribution(
                        tree, labeling, j, cal_method)

                    for dp in dist:
                        if dp >= y:
                            inst = labeling.get_instance(i)
                            inst.set_value(inst.class_index, clsLabel)
                            data1.add_instance(inst)
                            labeling.delete(i)
                            l += 1
                            j -= 1

                    j += 1

            if k == (len(dist) - 1) and (l != 0):
                tree.build_classifier(data1)
                i = -1
                s += l
                l = 0
        i += 1

    data1.compactify()
    return data1
Beispiel #4
0
    def folds(self, nfolds=10, seed=None):
        """
        Get (training,testing) datasets for cross-validation.

        Arguments:

            nfolds (int, optional): Number of folds. Default value is
              10.
            seed (int, optional): Seed value for shuffling
              dataset. Default value is random int 0 <= x <= 10000.

        Returns:

            list of (Instances,Instances) tuples: Each list element is
              a pair of (training,testing) datasets, respectively.
        """
        seed = seed or randint(0, 10000)
        rnd = WekaRandom(seed)

        fold_size = labmath.ceil(self.instances.num_instances / nfolds)

        # Shuffle the dataset.
        instances = WekaInstances.copy_instances(self.instances)
        instances.randomize(rnd)

        folds = []
        for i in range(nfolds):
            offset = i * fold_size
            testing_end = min(offset + fold_size, instances.num_instances - 1)

            # Calculate dataset indices for testing and training data.
            testing_range = (offset, testing_end - offset)
            left_range = (0, offset)
            right_range = (testing_end, instances.num_instances - testing_end)

            # If there's nothing to test, move on.
            if testing_range[1] < 1: continue

            # Create testing and training folds.
            testing = WekaInstances.copy_instances(instances, *testing_range)
            left = WekaInstances.copy_instances(instances, *left_range)
            right = WekaInstances.copy_instances(instances, *right_range)
            training = WekaInstances.append_instances(left, right)

            # Add fold to collection.
            folds.append((training, testing))

        return folds
def main():

    try:
        jvm.start()

        loader = Loader(classname="weka.core.converters.CSVLoader")
        data = loader.load_file("./data/adult.csv")

        data.class_is_last()  # set class attribute

        # randomize data
        folds = k
        seed = 1
        rnd = Random(seed)
        rand_data = Instances.copy_instances(data)
        rand_data.randomize(rnd)
        if rand_data.class_attribute.is_nominal:
            rand_data.stratify(folds)

        NaiveBayes(rand_data, folds, seed, data)
        DecisionTree(rand_data, folds, seed, data)
    except Exception as e:
        raise e
    finally:
        jvm.stop()
def create_subsample(data, percent, seed=1):
    """
    Generates a subsample of the dataset.
    :param data: the data to create the subsample from
    :type data: Instances
    :param percent: the percentage (0-100)
    :type percent: float
    :param seed: the seed value to use
    :type seed: int
    """
    if percent <= 0 or percent >= 100:
        return data
    data = Instances.copy_instances(data)
    data.randomize(Random(seed))
    data = Instances.copy_instances(data, 0, int(round(data.num_instances() * percent / 100.0)))
    return data
def DecisionTree(rnd_data, folds, seed, data):

    data_size = rnd_data.num_instances
    fold_size = math.floor(data_size / folds)

    # cross-validation
    evaluation = Evaluation(rnd_data)
    for i in range(folds):
        this_fold = fold_size
        test_start = i * fold_size
        test_end = (test_start + fold_size)
        if ((data_size - test_end) / fold_size < 1):
            this_fold = data_size - test_start
        test = Instances.copy_instances(rnd_data, test_start,
                                        this_fold)  # generate validation fold
        if i == 0:
            train = Instances.copy_instances(rnd_data, test_end,
                                             data_size - test_end)
        else:
            train_1 = Instances.copy_instances(rnd_data, 0, test_start)
            train_2 = Instances.copy_instances(rnd_data, test_end,
                                               data_size - test_end)
            train = Instances.append_instances(
                train_1, train_2)  # generate training fold

        # build and evaluate classifier
        cls = Classifier(classname="weka.classifiers.trees.J48")
        cls.build_classifier(train)  # build classifier on training set
        evaluation.test_model(cls,
                              test)  # test classifier on validation/test set

    print("")
    print("=== Decision Tree ===")
    print("Classifier: " + cls.to_commandline())
    print("Dataset: " + data.relationname)
    print("Folds: " + str(folds))
    print("Seed: " + str(seed))
    print("")
    print(
        evaluation.summary("=== " + str(folds) + "-fold Cross-Validation ==="))
Beispiel #8
0
def LabeledUnlabeldata(data, unlabeled, tree, y, cal_method=None ) :
    
	data1 = Instances.copy_instances(data)
	labeling = Instances.copy_instances(unlabeled)
	tree.build_classifier(data1)
	update=False
	it=0
	labeling_num_instances = labeling.num_instances
	while labeling.num_instances > 3 and it < labeling_num_instances:
		it+=1
		update = False
		removed_index=set()
		print("labeling.num_instances ===>>   " , labeling.num_instances)

		for i,xi in enumerate(labeling) :
			clsLabel= tree.classify_instance(xi)
			dist = calculate_probability_distribution(tree , labeling , i , cal_method)
			for dp in dist :
				if dp >= y :
					update = True
					xi.set_value(xi.class_index,clsLabel)
					data1.add_instance(xi)
					removed_index.add(i)

		print("labeling ==================>>", labeling.num_instances)
		print("removed_index ==================>>", len(removed_index))
		removed_index_list = sorted(removed_index)
		for i,ii in enumerate(removed_index_list) :
			labeling.delete(ii-i)
		print("labeling ==================>>", labeling.num_instances)


		if update:
			tree.build_classifier(data1)


	data1.compactify()
	return data1
	def training(self):
		# Preparação dos dados
		self.imp = Imputation(self.data)

		# Seleciona as caracteristicas
		self.features = FeatureSelection(self.imp.imputed_data)
		data_selected = self.features.data_selected
		self.selected_features = self.features.selected_features

		# Encontra os padrões ausentes
		self.missing_patterns = MissingPatterns(self.data, self.selected_features).missing_patterns

		# Realiza o treinamento dos classificadores
		#print('test train')
		for mpi in self.missing_patterns:

			# Seleciona as caracteristicas
			cpi = set(self.selected_features) - set(mpi)
			data_temp = Instances.copy_instances(data_selected, from_row=0, num_rows=data_selected.num_instances)
			data_temp.class_is_last()

			# Separa os dados de treinamento
			data_temp = self.reduceData(data_temp, cpi, self.data)

			
			# Treina os classificadores com os dados imputados
			classifier = Classifier(classname=self.learn_class, options=self.options)
			classifier.build_classifier(data_temp)
			
			#print(classifier.distribution_for_instance(data_selected.get_instance(30)))
			

			#!!!!!! Verica o peso de cada classificador (sua acuracia de classificação)
			evl = Evaluation(data_temp)
			evl.crossvalidate_model(classifier, data_temp, 15, Random(1))

			# Adiciona os classificadores treinados ao conjunto de classificadores
			my_classifier = MyClassifier(classifier, cpi, 1 - evl.mean_absolute_error)
			self.classifiers.add(my_classifier)
Beispiel #10
0
def main():
    """
    Just runs some example code.
    """

    # load a dataset
    data_file = helper.get_data_dir() + os.sep + "vote.arff"
    helper.print_info("Loading dataset: " + data_file)
    loader = Loader("weka.core.converters.ArffLoader")
    data = loader.load_file(data_file)
    data.class_is_last()

    # classifier
    classifier = Classifier(classname="weka.classifiers.trees.J48")

    # randomize data
    folds = 10
    seed = 1
    rnd = Random(seed)
    rand_data = Instances.copy_instances(data)
    rand_data.randomize(rnd)
    if rand_data.class_attribute.is_nominal:
        rand_data.stratify(folds)

    # perform cross-validation and add predictions
    predicted_data = None
    evaluation = Evaluation(rand_data)
    for i in xrange(folds):
        train = rand_data.train_cv(folds, i)
        # the above code is used by the StratifiedRemoveFolds filter,
        # the following code is used by the Explorer/Experimenter
        # train = rand_data.train_cv(folds, i, rnd)
        test = rand_data.test_cv(folds, i)

        # build and evaluate classifier
        cls = Classifier.make_copy(classifier)
        cls.build_classifier(train)
        evaluation.test_model(cls, test)

        # add predictions
        addcls = Filter(
            classname="weka.filters.supervised.attribute.AddClassification",
            options=["-classification", "-distribution", "-error"])
        # setting the java object directory avoids issues with correct quoting in option array
        addcls.set_property("classifier", Classifier.make_copy(classifier))
        addcls.inputformat(train)
        addcls.filter(train)  # trains the classifier
        pred = addcls.filter(test)
        if predicted_data is None:
            predicted_data = Instances.template_instances(pred, 0)
        for n in xrange(pred.num_instances):
            predicted_data.add_instance(pred.get_instance(n))

    print("")
    print("=== Setup ===")
    print("Classifier: " + classifier.to_commandline())
    print("Dataset: " + data.relationname)
    print("Folds: " + str(folds))
    print("Seed: " + str(seed))
    print("")
    print(evaluation.summary("=== " + str(folds) + " -fold Cross-Validation ==="))
    print("")
    print(predicted_data)
def main():
    """
    Just runs some example code.
    """

    # load a dataset
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    helper.print_title("Iris dataset")
    print(iris_data)
    helper.print_title("Iris dataset (incrementally output)")
    for i in iris_data:
        print(i)
    helper.print_title("Iris summary")
    print(Instances.summary(iris_data))
    helper.print_title("Iris attributes")
    for a in iris_data.attributes():
        print(a)
    helper.print_title("Instance at #0")
    print(iris_data.get_instance(0))
    print(iris_data.get_instance(0).values)
    print("Attribute stats (first):\n" + str(iris_data.attribute_stats(0)))
    print("total count (first attribute):\n" + str(iris_data.attribute_stats(0).total_count))
    print("numeric stats (first attribute):\n" + str(iris_data.attribute_stats(0).numeric_stats))
    print("nominal counts (last attribute):\n"
          + str(iris_data.attribute_stats(iris_data.num_attributes - 1).nominal_counts))
    helper.print_title("Instance values at #0")
    for v in iris_data.get_instance(0):
        print(v)

    # append datasets
    helper.print_title("append datasets")
    data1 = Instances.copy_instances(iris_data, 0, 2)
    data2 = Instances.copy_instances(iris_data, 2, 2)
    print("Dataset #1:\n" + str(data1))
    print("Dataset #2:\n" + str(data2))
    msg = data1.equal_headers(data2)
    print("#1 == #2 ? " + "yes" if msg is None else msg)
    combined = Instances.append_instances(data1, data2)
    print("Combined:\n" + str(combined))

    # merge datasets
    helper.print_title("merge datasets")
    data1 = Instances.copy_instances(iris_data, 0, 2)
    data1.class_index = -1
    data1.delete_attribute(1)
    data1.delete_first_attribute()
    data2 = Instances.copy_instances(iris_data, 0, 2)
    data2.class_index = -1
    data2.delete_attribute(4)
    data2.delete_attribute(3)
    data2.delete_attribute(2)
    print("Dataset #1:\n" + str(data1))
    print("Dataset #2:\n" + str(data2))
    msg = data1.equal_headers(data2)
    print("#1 == #2 ? " + ("yes" if msg is None else msg))
    combined = Instances.merge_instances(data2, data1)
    print("Combined:\n" + str(combined))

    # load dataset incrementally
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset incrementally: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file, incremental=True)
    iris_data.class_is_last()
    helper.print_title("Iris dataset")
    print(iris_data)
    for inst in loader:
        print(inst)

    # create attributes
    helper.print_title("Creating attributes")
    num_att = Attribute.create_numeric("num")
    print("numeric: " + str(num_att))
    date_att = Attribute.create_date("dat", "yyyy-MM-dd")
    print("date: " + str(date_att))
    nom_att = Attribute.create_nominal("nom", ["label1", "label2"])
    print("nominal: " + str(nom_att))

    # create dataset
    helper.print_title("Create dataset")
    dataset = Instances.create_instances("helloworld", [num_att, date_att, nom_att], 0)
    print(str(dataset))

    # create an instance
    helper.print_title("Create and add instance")
    values = [3.1415926, date_att.parse_date("2014-04-10"), 1.0]
    inst = Instance.create_instance(values)
    print("Instance #1:\n" + str(inst))
    dataset.add_instance(inst)
    values = [2.71828, date_att.parse_date("2014-08-09"), Instance.missing_value()]
    inst = Instance.create_instance(values)
    dataset.add_instance(inst)
    print("Instance #2:\n" + str(inst))
    inst.set_value(0, 4.0)
    print("Instance #2 (updated):\n" + str(inst))
    print("Dataset:\n" + str(dataset))
    dataset.delete_with_missing(2)
    print("Dataset (after delete of missing):\n" + str(dataset))
    values = [(1, date_att.parse_date("2014-07-11"))]
    inst = Instance.create_sparse_instance(values, 3, classname="weka.core.SparseInstance")
    print("sparse Instance:\n" + str(inst))
    dataset.add_instance(inst)
    print("dataset with mixed dense/sparse instance objects:\n" + str(dataset))

    # create dataset (lists)
    helper.print_title("Create dataset from lists")
    x = [[randint(1, 10) for _ in range(5)] for _ in range(10)]
    y = [randint(0, 1) for _ in range(10)]
    dataset2 = ds.create_instances_from_lists(x, y, "generated from lists")
    print(dataset2)
    x = [[randint(1, 10) for _ in range(5)] for _ in range(10)]
    dataset2 = ds.create_instances_from_lists(x, name="generated from lists (no y)")
    print(dataset2)

    # create dataset (matrices)
    helper.print_title("Create dataset from matrices")
    x = np.random.randn(10, 5)
    y = np.random.randn(10)
    dataset3 = ds.create_instances_from_matrices(x, y, "generated from matrices")
    print(dataset3)
    x = np.random.randn(10, 5)
    dataset3 = ds.create_instances_from_matrices(x, name="generated from matrices (no y)")
    print(dataset3)

    # create more sparse instances
    diabetes_file = helper.get_data_dir() + os.sep + "diabetes.arff"
    helper.print_info("Loading dataset: " + diabetes_file)
    loader = Loader("weka.core.converters.ArffLoader")
    diabetes_data = loader.load_file(diabetes_file)
    diabetes_data.class_is_last()
    helper.print_title("Create sparse instances using template dataset")
    sparse_data = Instances.template_instances(diabetes_data)
    for i in range(diabetes_data.num_attributes - 1):
        inst = Instance.create_sparse_instance(
            [(i, float(i+1) / 10.0)], sparse_data.num_attributes, classname="weka.core.SparseInstance")
        sparse_data.add_instance(inst)
    print("sparse dataset:\n" + str(sparse_data))

    # simple scatterplot of iris dataset: petalwidth x petallength
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.scatter_plot(
        iris_data, iris_data.attribute_by_name("petalwidth").index,
        iris_data.attribute_by_name("petallength").index,
        percent=50,
        wait=False)

    # line plot of iris dataset (without class attribute)
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.line_plot(iris_data, atts=range(iris_data.num_attributes - 1), percent=50, title="Line plot iris", wait=False)

    # matrix plot of iris dataset
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.matrix_plot(iris_data, percent=50, title="Matrix plot iris", wait=True)
def plot_learning_curve(classifiers, train, test=None, increments=100, metric="percent_correct",
                        title="Learning curve", label_template="[#] @ $", key_loc="lower right",
                        outfile=None, wait=True):
    """
    Plots
    :param classifiers: list of Classifier template objects
    :type classifiers: list of Classifier
    :param train: dataset to use for the building the classifier, used for evaluating it test set None
    :type train: Instances
    :param test: optional dataset to use for the testing the built classifiers
    :type test: Instances
    :param increments: the increments (>= 1: # of instances, <1: percentage of dataset)
    :type increments: float
    :param metric: the name of the numeric metric to plot (Evaluation.<metric>)
    :type metric: str
    :param title: the title for the plot
    :type title: str
    :param label_template: the template for the label in the plot
                           (#: 1-based index, @: full classname, !: simple classname, $: options)
    :type label_template: str
    :param key_loc: the location string for the key
    :type key_loc: str
    :param outfile: the output file, ignored if None
    :type outfile: str
    :param wait: whether to wait for the user to close the plot
    :type wait: bool
    """

    if not plot.matplotlib_available:
        logger.error("Matplotlib is not installed, plotting unavailable!")
        return
    if not train.has_class():
        logger.error("Training set has no class attribute set!")
        return
    if (test is not None) and (train.equal_headers(test) is not None):
        logger.error("Training and test set are not compatible: " + train.equal_headers(test))
        return

    if increments >= 1:
        inc = increments
    else:
        inc = round(train.num_instances * increments)

    steps = []
    cls = []
    evls = {}
    for classifier in classifiers:
        cl = Classifier.make_copy(classifier)
        cls.append(cl)
        evls[cl] = []
    if test is None:
        tst = train
    else:
        tst = test

    for i in xrange(train.num_instances):
        if (i > 0) and (i % inc == 0):
            steps.append(i+1)
        for cl in cls:
            # train
            if cl.is_updateable:
                if i == 0:
                    tr = Instances.copy_instances(train, 0, 1)
                    cl.build_classifier(tr)
                else:
                    cl.update_classifier(train.get_instance(i))
            else:
                if (i > 0) and (i % inc == 0):
                    tr = Instances.copy_instances(train, 0, i + 1)
                    cl.build_classifier(tr)
            # evaluate
            if (i > 0) and (i % inc == 0):
                evl = Evaluation(tst)
                evl.test_model(cl, tst)
                evls[cl].append(getattr(evl, metric))

    fig, ax = plt.subplots()
    ax.set_xlabel("# of instances")
    ax.set_ylabel(metric)
    ax.set_title(title)
    fig.canvas.set_window_title(title)
    ax.grid(True)
    i = 0
    for cl in cls:
        evl = evls[cl]
        i += 1
        plot_label = label_template.\
            replace("#", str(i)).\
            replace("@", cl.classname).\
            replace("!", cl.classname[cl.classname.rfind(".") + 1:]).\
            replace("$", join_options(cl.config))
        ax.plot(steps, evl, label=plot_label)
    plt.draw()
    plt.legend(loc=key_loc, shadow=True)
    if outfile is not None:
        plt.savefig(outfile)
    if wait:
        plt.show()
Beispiel #13
0
def plot_learning_curve(classifiers,
                        train,
                        test=None,
                        increments=100,
                        metric="percent_correct",
                        title="Learning curve",
                        label_template="[#] @ $",
                        key_loc="lower right",
                        outfile=None,
                        wait=True):
    """
    Plots a learning curve.

    :param classifiers: list of Classifier template objects
    :type classifiers: list of Classifier
    :param train: dataset to use for the building the classifier, used for evaluating it test set None
    :type train: Instances
    :param test: optional dataset (or list of datasets) to use for the testing the built classifiers
    :type test: list or Instances
    :param increments: the increments (>= 1: # of instances, <1: percentage of dataset)
    :type increments: float
    :param metric: the name of the numeric metric to plot (Evaluation.<metric>)
    :type metric: str
    :param title: the title for the plot
    :type title: str
    :param label_template: the template for the label in the plot
                           (#: 1-based index of classifier, @: full classname, !: simple classname,
                           $: options, *: 1-based index of test set)
    :type label_template: str
    :param key_loc: the location string for the key
    :type key_loc: str
    :param outfile: the output file, ignored if None
    :type outfile: str
    :param wait: whether to wait for the user to close the plot
    :type wait: bool
    """

    if not plot.matplotlib_available:
        logger.error("Matplotlib is not installed, plotting unavailable!")
        return
    if not train.has_class():
        logger.error("Training set has no class attribute set!")
        return

    if increments >= 1:
        inc = increments
    else:
        inc = round(train.num_instances * increments)

    if test is None:
        tst = [train]
    elif isinstance(test, list):
        tst = test
    elif isinstance(test, Instances):
        tst = [test]
    else:
        logger.error("Expected list or Instances object, instead: " +
                     type(test))
        return
    for t in tst:
        if train.equal_headers(t) is not None:
            logger.error("Training and test set are not compatible: " +
                         train.equal_headers(t))
            return

    steps = []
    cls = []
    evls = {}
    for classifier in classifiers:
        cl = Classifier.make_copy(classifier)
        cls.append(cl)
        evls[cl] = {}
        for t in tst:
            evls[cl][t] = []

    for i in xrange(train.num_instances):
        if (i > 0) and (i % inc == 0):
            steps.append(i + 1)
        for cl in cls:
            # train
            if cl.is_updateable:
                if i == 0:
                    tr = Instances.copy_instances(train, 0, 1)
                    cl.build_classifier(tr)
                else:
                    cl.update_classifier(train.get_instance(i))
            else:
                if (i > 0) and (i % inc == 0):
                    tr = Instances.copy_instances(train, 0, i + 1)
                    cl.build_classifier(tr)
            # evaluate
            if (i > 0) and (i % inc == 0):
                for t in tst:
                    evl = Evaluation(t)
                    evl.test_model(cl, t)
                    evls[cl][t].append(getattr(evl, metric))

    fig, ax = plt.subplots()
    ax.set_xlabel("# of instances")
    ax.set_ylabel(metric)
    ax.set_title(title)
    fig.canvas.set_window_title(title)
    ax.grid(True)
    i = 0
    for cl in cls:
        evlpertest = evls[cl]
        i += 1
        n = 0
        for t in tst:
            evl = evlpertest[t]
            n += 1
            plot_label = label_template.\
                replace("#", str(i)).\
                replace("*", str(n)).\
                replace("@", cl.classname).\
                replace("!", cl.classname[cl.classname.rfind(".") + 1:]).\
                replace("$", join_options(cl.config))
            ax.plot(steps, evl, label=plot_label)
    plt.draw()
    plt.legend(loc=key_loc, shadow=True)
    if outfile is not None:
        plt.savefig(outfile)
    if wait:
        plt.show()
Beispiel #14
0
print("\nLoading dataset: " + fname + "\n")
data = loader.load_file(fname)
data.class_is_last()

# define classifiers
classifiers = ["weka.classifiers.rules.OneR", "weka.classifiers.trees.J48"]

# cross-validate original dataset
for classifier in classifiers:
    cls = Classifier(classname=classifier)
    evl = Evaluation(data)
    evl.crossvalidate_model(cls, data, 10, Random(1))
    print("%s (original): %0.0f%%" % (classifier, evl.percent_correct))

# replace 'outlook' in first 4 'no' instances with 'missing'
modified = Instances.copy_instances(data)
count = 0
for i in xrange(modified.num_instances):
    if modified.get_instance(i).get_string_value(modified.class_index) == "no":
        count += 1
        modified.get_instance(i).set_missing(0)
        if count == 4:
            break

# cross-validate modified dataset
for classifier in classifiers:
    cls = Classifier(classname=classifier)
    evl = Evaluation(modified)
    evl.crossvalidate_model(cls, modified, 10, Random(1))
    print("%s (modified): %0.0f%%" % (classifier, evl.percent_correct))
def learning_curve(folds, data):
    training_set_size = []
    train_error_nb = []
    train_error_dtree = []
    cv_error_nb = []
    cv_error_dtree = []

    print("")
    print("This may take some time, please wait..")
    for training_size in range(10, 32561, 750):
        print(".")
        training_set_size.append(training_size)
        train_data = Instances.copy_instances(data, 0, training_size)

        data_size = train_data.num_instances
        fold_size = math.floor(data_size / folds)

        # calculating training and cross-validation error
        evaluation_nb_train = Evaluation(train_data)
        evaluation_nb_cv = Evaluation(train_data)
        evaluation_dtree_train = Evaluation(train_data)
        evaluation_dtree_cv = Evaluation(train_data)
        for i in range(folds):
            this_fold = fold_size
            test_start = i * fold_size
            test_end = (test_start + fold_size)
            if ((data_size - test_end) / fold_size < 1):
                this_fold = data_size - test_start
            test = Instances.copy_instances(
                train_data, test_start, this_fold)  # generate validation fold
            if i == 0:
                train = Instances.copy_instances(train_data, test_end,
                                                 data_size - test_end)
            else:
                train_1 = Instances.copy_instances(train_data, 0, test_start)
                train_2 = Instances.copy_instances(train_data, test_end,
                                                   data_size - test_end)
                train = Instances.append_instances(
                    train_1, train_2)  # generate training fold

            # Naive Bayes
            nb = Classifier(classname="weka.classifiers.bayes.NaiveBayes")
            nb.build_classifier(train)
            evaluation_nb_train.test_model(nb, train)
            evaluation_nb_cv.test_model(nb, test)

            # Decision Tree
            dtree = Classifier(classname="weka.classifiers.trees.J48")
            dtree.build_classifier(train)
            evaluation_dtree_train.test_model(dtree, train)
            evaluation_dtree_cv.test_model(dtree, test)

        train_error_nb.append(
            evaluation_nb_train.error_rate)  # training error - NB
        cv_error_nb.append(
            evaluation_nb_cv.error_rate)  # cross-validation error - NB
        train_error_dtree.append(
            evaluation_dtree_train.error_rate)  # training error - DTree
        cv_error_dtree.append(
            evaluation_dtree_cv.error_rate)  # cross-validation error - DTree

    # Plotting of Learning Curve
    x = training_set_size
    y1 = train_error_nb
    z1 = cv_error_nb
    y2 = train_error_dtree
    z2 = cv_error_dtree

    fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(13, 8))

    axes[0].plot(x, y1, label='Training Error')
    axes[0].plot(x, z1, label='Cross-Validation Error')
    axes[0].set_xlabel('Training Set Size')
    axes[0].set_ylabel('Error Rate')
    axes[0].set_title('Naive Bayes')
    axes[0].legend()

    axes[1].plot(x, y2, label='Training Error')
    axes[1].plot(x, z2, label='Cross-Validation Error')
    axes[1].set_xlabel('Training Set Size')
    axes[1].set_ylabel('Error Rate')
    axes[1].set_title('Decision Tree')
    axes[1].legend()

    plt.show(block=True)
def main():
    """
    Just runs some example code.
    """

    # load a dataset
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    helper.print_title("Iris dataset")
    print(iris_data)
    helper.print_title("Iris dataset (incrementally output)")
    for i in iris_data:
        print(i)
    helper.print_title("Iris summary")
    print(Instances.summary(iris_data))
    helper.print_title("Iris attributes")
    for a in iris_data.attributes():
        print(a)
    helper.print_title("Instance at #0")
    print(iris_data.get_instance(0))
    print(iris_data.get_instance(0).values)
    print("Attribute stats (first):\n" + str(iris_data.attribute_stats(0)))
    print("total count (first attribute):\n" +
          str(iris_data.attribute_stats(0).total_count))
    print("numeric stats (first attribute):\n" +
          str(iris_data.attribute_stats(0).numeric_stats))
    print("nominal counts (last attribute):\n" + str(
        iris_data.attribute_stats(iris_data.num_attributes -
                                  1).nominal_counts))
    helper.print_title("Instance values at #0")
    for v in iris_data.get_instance(0):
        print(v)

    # append datasets
    helper.print_title("append datasets")
    data1 = Instances.copy_instances(iris_data, 0, 2)
    data2 = Instances.copy_instances(iris_data, 2, 2)
    print("Dataset #1:\n" + str(data1))
    print("Dataset #2:\n" + str(data2))
    msg = data1.equal_headers(data2)
    print("#1 == #2 ? " + "yes" if msg is None else msg)
    combined = Instances.append_instances(data1, data2)
    print("Combined:\n" + str(combined))

    # merge datasets
    helper.print_title("merge datasets")
    data1 = Instances.copy_instances(iris_data, 0, 2)
    data1.class_index = -1
    data1.delete_attribute(1)
    data1.delete_first_attribute()
    data2 = Instances.copy_instances(iris_data, 0, 2)
    data2.class_index = -1
    data2.delete_attribute(4)
    data2.delete_attribute(3)
    data2.delete_attribute(2)
    print("Dataset #1:\n" + str(data1))
    print("Dataset #2:\n" + str(data2))
    msg = data1.equal_headers(data2)
    print("#1 == #2 ? " + ("yes" if msg is None else msg))
    combined = Instances.merge_instances(data2, data1)
    print("Combined:\n" + str(combined))

    # load dataset incrementally
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset incrementally: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file, incremental=True)
    iris_data.class_is_last()
    helper.print_title("Iris dataset")
    print(iris_data)
    for inst in loader:
        print(inst)

    # create attributes
    helper.print_title("Creating attributes")
    num_att = Attribute.create_numeric("num")
    print("numeric: " + str(num_att))
    date_att = Attribute.create_date("dat", "yyyy-MM-dd")
    print("date: " + str(date_att))
    nom_att = Attribute.create_nominal("nom", ["label1", "label2"])
    print("nominal: " + str(nom_att))

    # create dataset
    helper.print_title("Create dataset")
    dataset = Instances.create_instances("helloworld",
                                         [num_att, date_att, nom_att], 0)
    print(str(dataset))

    # create an instance
    helper.print_title("Create and add instance")
    values = [3.1415926, date_att.parse_date("2014-04-10"), 1.0]
    inst = Instance.create_instance(values)
    print("Instance #1:\n" + str(inst))
    dataset.add_instance(inst)
    values = [
        2.71828,
        date_att.parse_date("2014-08-09"),
        Instance.missing_value()
    ]
    inst = Instance.create_instance(values)
    dataset.add_instance(inst)
    print("Instance #2:\n" + str(inst))
    inst.set_value(0, 4.0)
    print("Instance #2 (updated):\n" + str(inst))
    print("Dataset:\n" + str(dataset))
    dataset.delete_with_missing(2)
    print("Dataset (after delete of missing):\n" + str(dataset))
    values = [(1, date_att.parse_date("2014-07-11"))]
    inst = Instance.create_sparse_instance(
        values, 3, classname="weka.core.SparseInstance")
    print("sparse Instance:\n" + str(inst))
    dataset.add_instance(inst)
    print("dataset with mixed dense/sparse instance objects:\n" + str(dataset))

    # create dataset (lists)
    helper.print_title("Create dataset from lists")
    x = [[randint(1, 10) for _ in range(5)] for _ in range(10)]
    y = [randint(0, 1) for _ in range(10)]
    dataset2 = ds.create_instances_from_lists(x, y, "generated from lists")
    print(dataset2)
    x = [[randint(1, 10) for _ in range(5)] for _ in range(10)]
    dataset2 = ds.create_instances_from_lists(
        x, name="generated from lists (no y)")
    print(dataset2)

    # create dataset (matrices)
    helper.print_title("Create dataset from matrices")
    x = np.random.randn(10, 5)
    y = np.random.randn(10)
    dataset3 = ds.create_instances_from_matrices(x, y,
                                                 "generated from matrices")
    print(dataset3)
    x = np.random.randn(10, 5)
    dataset3 = ds.create_instances_from_matrices(
        x, name="generated from matrices (no y)")
    print(dataset3)

    # create more sparse instances
    diabetes_file = helper.get_data_dir() + os.sep + "diabetes.arff"
    helper.print_info("Loading dataset: " + diabetes_file)
    loader = Loader("weka.core.converters.ArffLoader")
    diabetes_data = loader.load_file(diabetes_file)
    diabetes_data.class_is_last()
    helper.print_title("Create sparse instances using template dataset")
    sparse_data = Instances.template_instances(diabetes_data)
    for i in xrange(diabetes_data.num_attributes - 1):
        inst = Instance.create_sparse_instance(
            [(i, float(i + 1) / 10.0)],
            sparse_data.num_attributes,
            classname="weka.core.SparseInstance")
        sparse_data.add_instance(inst)
    print("sparse dataset:\n" + str(sparse_data))

    # simple scatterplot of iris dataset: petalwidth x petallength
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.scatter_plot(iris_data,
                     iris_data.attribute_by_name("petalwidth").index,
                     iris_data.attribute_by_name("petallength").index,
                     percent=50,
                     wait=False)

    # line plot of iris dataset (without class attribute)
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.line_plot(iris_data,
                  atts=xrange(iris_data.num_attributes - 1),
                  percent=50,
                  title="Line plot iris",
                  wait=False)

    # matrix plot of iris dataset
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()
    pld.matrix_plot(iris_data, percent=50, title="Matrix plot iris", wait=True)
    def perceptron_classifier(cls, features, settings):
        # carrega o dataset
        loader = Loader("weka.core.converters.ArffLoader")
        instancias = loader.load_file(
            "./src/results/caracteristicas_sounds.arff")
        # sinaliza que o ultimo atributo é a classe
        instancias.class_is_last()
        # Define os Parametros
        learning_rate = str(settings['learningRate'])
        training_time = str(settings['trainingTime'])
        momentum = "0.2"
        hidden_layers = "a"
        seed = 2
        cross_validation = 20
        print('Learning Rate', learning_rate)
        print('Training Time', training_time)
        # Carrega o classificafor  Multilayer Perceptron de acordo com os parametros definidos
        classifier = Classifier(
            classname="weka.classifiers.functions.MultilayerPerceptron",
            options=[
                "-L", learning_rate, "-M", momentum, "-N", training_time, "-V",
                "0", "-S",
                str(seed), "-E", "20", "-H", hidden_layers
            ])
        # Constroi o Classificador e Valida o dataset
        classifier.build_classifier(instancias)
        evaluation = Evaluation(instancias)
        # Aplica o Cross Validation
        rnd = Random(seed)
        rand_data = Instances.copy_instances(instancias)
        rand_data.randomize(rnd)
        if rand_data.class_attribute.is_nominal:
            rand_data.stratify(cross_validation)
        for i in range(cross_validation):
            # treina as instancias
            train = instancias.train_cv(cross_validation, i)
            # testa as instancias
            test = instancias.test_cv(cross_validation, i)

            # Constroi e Valida o Classificador
            cls = Classifier.make_copy(classifier)
            cls.build_classifier(train)
            evaluation.test_model(cls, test)
        # Cria uma nova instância com base nas caracteristicas extraidas
        new_instance = Instance.create_instance(features)
        # Adiciona a nova instância ao dataset
        instancias.add_instance(new_instance)
        # Liga a nova instancia ao dataset treinado com o classificador
        new_instance.dataset = train
        # Classifica a nova instância trazendo as probabilidades de ela pertencer as classes definidas
        classification = classifier.distribution_for_instance(new_instance)
        result = {
            'cat': round(classification[0] * 100, 2),
            'dog': round(classification[1] * 100, 2)
        }
        print("=== Setup ===")
        print("Classifier: " + classifier.to_commandline())
        print("Dataset: " + instancias.relationname)
        print("Cross Validation: " + str(cross_validation) + "folds")
        print("Seed: " + str(seed))
        print("")
        print(
            evaluation.summary("=== " + str(cross_validation) +
                               " -fold Cross-Validation ==="))
        print("Classificação", " - Gato: ", result['cat'], "  Cachorro: ",
              result['dog'])

        return result
Beispiel #18
0
 def copy(self, from_row=None, num_rows=None):
     return WekaInstances.copy_instances(self.instances,
                                         from_row=from_row,
                                         num_rows=num_rows)
import weka.core.jvm as jvm #weka requires java toolkit
import weka.core.converters as con #for converting the data set
from weka.clusterers import Clusterer #for clustering
from weka.classifiers import Classifier
from weka.core.dataset import Instances
from weka.core.dataset import Instance
from weka.classifiers import Evaluation, PredictionOutput
from weka.core.classes import JavaObject
import javabridge
import numpy
import random


jvm.start() #starting jvm
data = con.load_any_file("traffictrainroad1.arff") #to load the required file
data_copy = Instances.copy_instances(data)
test = con.load_any_file("traffictestroad1.arff")
test_copy = Instances.copy_instances(test)
test.delete_last_attribute()
data.class_is_last()
#separate_test = Instances.template_instances(test_copy)



class Instances(JavaObject):
	def __init__(self, jobject):
		self.__num_attributes = javabridge.make_call(self.jobject, "numAttributes", "()I")


	def num_attributes(self):
		return self.__num_attributes()
Beispiel #20
0
print("\nLoading dataset: " + fname + "\n")
data = loader.load_file(fname)
data.class_is_last()

# define classifiers
classifiers = ["weka.classifiers.rules.OneR", "weka.classifiers.trees.J48"]

# cross-validate original dataset
for classifier in classifiers:
    cls = Classifier(classname=classifier)
    evl = Evaluation(data)
    evl.crossvalidate_model(cls, data, 10, Random(1))
    print("%s (original): %0.0f%%" % (classifier, evl.percent_correct))

# replace 'outlook' in first 4 'no' instances with 'missing'
modified = Instances.copy_instances(data)
count = 0
for i in xrange(modified.num_instances):
    if modified.get_instance(i).get_string_value(modified.class_index) == "no":
        count += 1
        modified.get_instance(i).set_missing(0)
        if count == 4:
            break

# cross-validate modified dataset
for classifier in classifiers:
    cls = Classifier(classname=classifier)
    evl = Evaluation(modified)
    evl.crossvalidate_model(cls, modified, 10, Random(1))
    print("%s (modified): %0.0f%%" % (classifier, evl.percent_correct))
Beispiel #21
0
data_file = "/root/PycharmProjects/untitled/stuff/iris.arff"

helper.print_info("Loading dataset: " + data_file)
loader = Loader("weka.core.converters.ArffLoader")
data = loader.load_file(data_file)
data.class_is_last()

print(data)

classifier = Classifier(classname="weka.classifiers.trees.J48")

# randomize data
folds = 10
seed = 1
rnd = Random(seed)
rand_data = Instances.copy_instances(data)
rand_data.randomize(rnd)
if rand_data.class_attribute.is_nominal:
    rand_data.stratify(folds)

# perform cross-validation and add predictions
predicted_data = None
evaluation = Evaluation(rand_data)
for i in xrange(folds):
    train = rand_data.train_cv(folds, i)
    # the above code is used by the StratifiedRemoveFolds filter,
    # the following code is used by the Explorer/Experimenter
    # train = rand_data.train_cv(folds, i, rnd)
    test = rand_data.test_cv(folds, i)

    # build and evaluate classifier