def our_avg_run(avg_num_of_run,filename): dataset = load_dataset(filename) ptraining_error = [] perceptron_error = [] for i in range(avg_num_of_run): (training_set, testing_set) = split_dataset(dataset, PROBABILITY_TRAINING_SET) testing_set = dataset if IS_VERBOSE: print("training set size: %s testing set size: %s num instances: %s" % (len(training_set), len(testing_set), len(dataset))) (train_x, train_y) = split_attribute_and_label(training_set) (test_x, test_y) = split_attribute_and_label(testing_set) p = PerceptronClassifier(ETA, THRESHOLD, UPPER_BOUND, False) p.fit(train_x, train_y) t_result_list = p.predict(train_x) ptraining_error.append(calculate_error(train_y, t_result_list)) result_list = p.predict(test_x) perceptron_error.append(calculate_error(test_y, result_list)) print(p.weights) return sum(perceptron_error) / len(perceptron_error) , sum(ptraining_error) / len(ptraining_error)