def test_two(in_sample, out_sample): target_function = random_target_function() training_set = random_set(in_sample, target_function) weight = linear_percepton(training_set.z, training_set.y) in_error = weight_error(weight, training_set.z, training_set.y) testing_set = random_set(out_sample, target_function) out_error = weight_error(weight, testing_set.z, testing_set.y) return in_error, out_error
def test_four(in_sample, out_sample): training_set = random_set(in_sample, moved_circle) noisy_indices = np.random.choice(in_sample, size=round(0.1 * in_sample), replace=False) training_set.y[noisy_indices] *= -1 weight = linear_percepton(training_set.z, training_set.y) in_error_no_transform = weight_error(weight, training_set.z, training_set.y) training_set.z = second_order(training_set.z) weight = linear_percepton(training_set.z, training_set.y) in_error_transform = weight_error(weight, training_set.z, training_set.y) testing_set = random_set(out_sample, moved_circle, second_order) noisy_indices = np.random.choice(out_sample, size=round(0.1 * out_sample), replace=False) testing_set.y[noisy_indices] *= -1 out_error_transform = weight_error(weight, testing_set.z, testing_set.y) return in_error_no_transform, weight, out_error_transform
def test1(training_data, testing_data): training_set = DataML(training_data, transform) weight = linear_percepton(training_set.z, training_set.y) testing_set = DataML(testing_data, transform) in_error, out_error = [ weight_error(weight, data_set.z, data_set.y) for data_set in [training_set, testing_set] ] return in_error, out_error
def trial(in_sample, out_sample): target_function = random_target_function() training_set = random_set(in_sample, target_function) initial_weight = np.zeros(len(training_set.x[0])) weight, iterations = pla(training_set.z, training_set.y, initial_weight, True) testing_set = random_set(out_sample, target_function) out_error = weight_error(weight, testing_set.z, testing_set.y) return out_error, iterations
def best_model(model_weights, testing_set): errors = [ weight_error(weight, testing_set.z[:, :len(weight)], testing_set.y) for weight in model_weights ] return len( model_weights[np.argmin(errors)] ) - 1, errors # return k value that yields least error. see k_values
def trial(training_data, testing_data, a): training_set = DataML(training_data, transform) weights = minimize_error_aug(training_set.z, training_set.y, a) in_error, out_error = [ weight_error(weights, tset.z, tset.y) for tset in [training_set, DataML(testing_data, transform)] ] return in_error, out_error
def myTrial(in_sample, out_sample): target_function = random_target_function() #w0 es agregado por default training_set = random_set(in_sample, target_function) initial_weight = np.zeros(len(training_set.x[0])) weight, iterations = myOwnPlaImplementation(training_set.z, training_set.y, initial_weight) testing_set = random_set(out_sample, target_function) out_error = weight_error(weight, testing_set.z, testing_set.y) return out_error, iterations
def trial(in_sample, out_of_sample): target_function = random_target_function() training_set = random_set(in_sample, target_function) pla_weight = pla(training_set.z, training_set.y) svm_weight = svm(training_set.z, training_set.y) testing_set = random_set(out_of_sample, target_function) pla_error, svm_error = [ weight_error(weight, testing_set.z, testing_set.y) for weight in [ pla_weight, svm_weight] ] def helper(x): if x > 0: return 0 else: return 1 difference = pla_error - svm_error svm_better = helper(difference) total_support_vectors = sum([ 1 for x in svm_weight if x >= 10*-3 ]) return svm_better, total_support_vectors
def trial(in_sample, out_of_sample): target_function = random_target_function() training_set = random_set(in_sample, target_function) pla_weight = pla(training_set.z, training_set.y) svm_weight = svm(training_set.z, training_set.y) testing_set = random_set(out_of_sample, target_function) pla_error, svm_error = [ weight_error(weight, testing_set.z, testing_set.y) for weight in [pla_weight, svm_weight] ] def helper(x): if x > 0: return 0 else: return 1 difference = pla_error - svm_error svm_better = helper(difference) total_support_vectors = sum([1 for x in svm_weight if x >= 10 * -3]) return svm_better, total_support_vectors
def train_test(training_set, testing_set, learn, learn_args=[]): weight = learn(training_set.z, training_set.y, *learn_args) in_sample_error = weight_error(weight, training_set.z, training_set.y) out_of_sample_error = weight_error(weight, testing_set.z, testing_set.y) return [in_sample_error, out_of_sample_error]
def best_model(model_weights, testing_set): errors = [ weight_error( weight, testing_set.z[:,:len(weight)], testing_set.y) for weight in model_weights ] return len(model_weights[np.argmin(errors)]) - 1, errors # return k value that yields least error. see k_values
def train_test(training_set, testing_set, learn, learn_args=[]): weight = learn(training_set.z, training_set.y, *learn_args) in_sample_error = weight_error(weight, training_set.z, training_set.y) out_of_sample_error = weight_error(weight, testing_set.z, testing_set.y) return [ in_sample_error, out_of_sample_error ]