def online_train(self, x, y): k = 0 N, M = x.shape r = permutation(N) self.errors_per_epoch = [] for epoch in range(self.epochs): y_output = self.forward(x[r[k]]) error = y[r[k]] - y_output calculate_metrics = metric(y, self.predict(x, test=False), types=['MSE']) metric_results = calculate_metrics.calculate() self.errors_per_epoch.append(metric_results['MSE']) self.backward(error, x[r[k]], self.learning_rate) k += 1 if k >= r.shape[0]: """ Verificar se já passou por todos os exemplos se sim, fazer novamente randperm() e colocar o contador no 0 """ r = permutation(x.shape[0]) k = 0
def k_fold_cross_validate(self, x, y, alphas): N, M = x.shape validation_accuracys = [] y = np.array(y, ndmin=2).T for alpha in alphas: K = 10 k_validation_accuracys = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (np.c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (np.c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) classifier = adaline(learning_rate=alpha, epochs=self.epochs) classifier.weights = zeros((M, 1)) classifier.online_train(x_train_val, y_train_val) y_out_val = classifier.predict(x_test_val, test=False) calculate_metrics = metric(y_test_val, y_out_val, types=['MSE']) metric_results = calculate_metrics.calculate() k_validation_accuracys.append(metric_results['MSE']) validation_accuracys.append(mean(k_validation_accuracys)) best_indice_alpha = argmin(validation_accuracys) self.learning_rate = alphas[best_indice_alpha]
def k_fold_cross_validate(self, x, y, alphas, hidden): """ @param x: @param y: @param alphas: @param hidden: @return: """ K = 10 N, M = x.shape validation_accuracys = np.zeros((len(hidden), len(alphas))) y = array(y, ndmin=2) i = 0 j = 0 for hidden_layer in hidden: for alpha in alphas: k_validation_accuracys = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) N, M = x_train_val.shape classifier = multilayer_perceptron_network(N, M, hidden_layer_neurons=hidden_layer, hidden_layers=1, activation_function=self.activation_function, epochs=1000, case=self.case, learning_rate=alpha) classifier.fit(x_train_val, y_train_val, validation=False, bias=False) y_out_val = classifier.predict(x_test_val, test=False) y_out_val = out_of_c_to_label(y_out_val) y_test_val = out_of_c_to_label(y_test_val) calculate_metrics = metric(y_test_val, y_out_val, types=['ACCURACY']) metric_results = calculate_metrics.calculate(average='micro') k_validation_accuracys.append(metric_results['ACCURACY']) validation_accuracys[i][j] = mean(k_validation_accuracys) j += 1 i += 1 j = 0 hidden_indice, alpha_indice = np.unravel_index(np.argmax(validation_accuracys, axis=None), validation_accuracys.shape) self.learning_rate = alphas[alpha_indice] self.hidden_layer_neurons = hidden[hidden_indice]
def k_fold_cross_validate(self, x, y, alphas): """ k-fold cross valition with grid search @param x: @param y: @param alphas: @return: """ N, M = x.shape validation_accuracys = [] y = array(y, ndmin=2).T for alpha in alphas: K = 10 k_validation_accuracys = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) classifier = hyperbolic_perceptron_network( epochs=self.epochs, number_of_neurons=self.number_of_neurons, learning_rate=alpha, activation_function=self.activation_function) classifier.fit(x_train_val, y_train_val, validation=False, bias=False) y_out_val = classifier.predict(x_test_val, test=False) # y_out_val[np.where(y_out_val == -1)] = 0 calculate_metrics = metric(y_test_val.tolist(), y_out_val.tolist(), types=['ACCURACY']) metric_results = calculate_metrics.calculate(average='macro') k_validation_accuracys.append(metric_results['ACCURACY']) validation_accuracys.append(mean(k_validation_accuracys)) best_indice_alpha = argmax(validation_accuracys) self.learning_rate = alphas[best_indice_alpha]
def k_fold_cross_validate(self, x, y, alphas): N, M = x.shape validation_accuracys = [] y = array(y, ndmin=2) for alpha in alphas: K = 10 k_validation_accuracys = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) classifier = simple_perceptron_network( epochs=self.epochs, number_of_neurons=self.number_of_neurons, learning_rate=alpha, activation_function=self.activation_function) classifier.fit(x_train_val, y_train_val, validation=False, bias=False) y_out_val = classifier.predict(x_test_val, test=False) y_out_val = out_of_c_to_label(y_out_val) y_test_val = out_of_c_to_label(y_test_val) calculate_metrics = metric(y_test_val, y_out_val, types=['ACCURACY']) metric_results = calculate_metrics.calculate(average='micro') k_validation_accuracys.append(metric_results['ACCURACY']) validation_accuracys.append(mean(k_validation_accuracys)) best_indice_alpha = argmax(validation_accuracys) self.learning_rate = alphas[best_indice_alpha]
def fit(self, x_train, y_train, x_train_val=None, y_train_val=None, alphas=None, hidden=None, validation=True, bias=True): if validation: self.k_fold_cross_validate(x_train_val, y_train_val, alphas=alphas, hidden=hidden, bias=True) else: self.done_validation = True if bias: x_train = self.add_bias(x_train) self.init_weigths() for ep in range(self.epochs): r = np.random.permutation(x_train.shape[0]) for k in range(len(r)): x = np.array((x_train[r[k]]), ndmin=2).T # (px1) y = np.array((y_train[r[k]]), ndmin=2).T # (cx1) hidden_output, y_output = self.foward(x) # 2. Propagação do erro na camada oculta output_error = y - y_output self.backward(output_error, y_output, hidden_output, x) if self.key and self.done_validation: predicted = self.predict(x_train) calculate_metrics = metric(predicted, y_train, types=['RMSE']) metric_results = calculate_metrics.calculate(average='micro') self.train_epochs_error.append(metric_results['RMSE'])
y_train_val = train_val[:, 4:] x_test = test[:, :4] y_test = test[:, 4:] validation_alphas = [0.15] hidden = 3 * np.arange(1, 5) simple_net = MultiLayerPerceptron(M, C, epochs=10000) simple_net.fit(x_train, y_train, x_train_val=x_train_val, y_train_val=y_train_val, alphas=validation_alphas, hidden=hidden) y_out_simple_net = simple_net.predict(x_test, bias=True) y_test = simple_net.predicao(y_test) metrics_calculator = metric(y_test, y_out_simple_net, types=['ACCURACY', 'precision', 'recall', 'f1_score']) metric_results = metrics_calculator.calculate(average='macro') print(metric_results) results['cf'].append((metric_results['ACCURACY'], metrics_calculator.confusion_matrix(list(y_test), y_out_simple_net, labels=[0,1,2]))) results['alphas'].append(simple_net.learning_rate) results['realization'].append(realization) for type in ['ACCURACY', 'precision', 'recall', 'f1_score']: results[type].append(metric_results[type]) results['cf'].sort(key=lambda x: x[0], reverse=True) final_result['best_cf'].append(results['cf'][0][1]) final_result['alphas'].append(mean(results['alphas']))
C = [0, 1] for realization in range(20): train, test = split_random(base, train_percentage=.8) x_train = train[:, :2] y_train = train[:, 2] x_test = test[:, :2] y_test = test[:, 2] classifier_knn = knn(x_train, y_train, k=3) y_out_knn = classifier_knn.predict(x_test) metrics_calculator = metric( list(y_test), y_out_knn, types=['ACCURACY', 'precision', 'recall', 'f1_score']) metric_results = metrics_calculator.calculate(average='micro') results['cf'].append( (metric_results['ACCURACY'], metrics_calculator.confusion_matrix(list(y_test), y_out_knn, labels=[0, 1]), classifier_knn)) results['realization'].append(realization) for type in ['ACCURACY', 'precision', 'recall', 'f1_score']: results[type].append(metric_results[type]) results['cf'].sort(key=lambda x: x[0], reverse=True)
train, test = split_random(base, train_percentage=.8) train = train.to_numpy() test = test.to_numpy() x_train = train[:, :2] y_train = train[:, 2:] x_test = test[:, :2] y_test = test[:, 2:] classifier_knn = knn(x_train, y_train) y_out_knn = classifier_knn.predict(x_test) metrics_calculator = metric(list(y_test.reshape(y_test.shape[0])), y_out_knn, types=['ACCURACY', 'AUC', 'precision', 'recall', 'f1_score']) metric_results = metrics_calculator.calculate(average='macro') results['cf'].append((metric_results['ACCURACY'], metrics_calculator.confusion_matrix(list(y_test.reshape(y_test.shape[0])), y_out_knn, labels=range(C)), classifier_knn )) results['realization'].append(realization) for type in ['ACCURACY', 'precision', 'recall', 'f1_score']: results[type].append(metric_results[type]) for type in ['ACCURACY', 'precision', 'recall', 'f1_score']: final_result[type].append(mean(results[type]))
y_test = test[different_target] y_test.to_numpy().reshape(y_test.shape[0], 1) # ---------------------------------- modeling ---------------------------------------------- validation_alphas = [0.15] hidden = 4 * np.arange(1, 5) simple_net = MultiLayerPerceptron(9, C, epochs=1000, Regressao=True, hidden_layer_neurons=12, learning_rate=0.15) simple_net.fit(x_train.to_numpy(), y_train, x_train_val=[], y_train_val=[], alphas=validation_alphas, hidden=hidden, validation=False) y_out = simple_net.predict(x_test, bias=True) metrics_calculator = metric(y_test, y_out, types=['MSE', 'RMSE']) metric_results = metrics_calculator.calculate() print(metric_results) results['alphas'].append(simple_net.lr) results['realization'].append(realization) for type in ['MSE', 'RMSE']: results[type].append(metric_results[type]) final_result['alphas'].append(mean(results['alphas'])) for type in ['MSE', 'RMSE']: final_result[type].append(mean(results[type])) final_result['std ' + type].append(std(results[type])) print(pd.DataFrame(final_result)) pd.DataFrame(final_result).to_csv(
x_test = test[:, :2] y_test = test[:, 2] regressor_adaline = adaline(epochs=1000, learning_rate=0.01) regressor_adaline.fit(x_train, y_train, x_train_val, y_train_val, alphas=validation_alphas) y_out_adaline = regressor_adaline.predict(x_test) metrics_calculator = metric(list(y_test), y_out_adaline, types=['MSE', 'RMSE']) metric_results = metrics_calculator.calculate() results['erros'].append( (metric_results['MSE'], regressor_adaline.errors_per_epoch)) results['alphas'].append(regressor_adaline.learning_rate) results['realization'].append(realization) for type in ['MSE', 'RMSE']: results[type].append(metric_results[type]) final_result['alphas'].append(mean(results['alphas'])) for type in ['MSE', 'RMSE']: final_result[type].append(mean(results[type])) final_result['std ' + type].append(std(results[type]))
x_test = test.drop(['Species'], axis=1) y_test = test['Species'] classifier_perceptron = perceptron(epochs=1000, learning_rate=0.01) classifier_perceptron.fit(x_train.to_numpy(), y_train.to_numpy(), x_train_val.to_numpy(), y_train_val.to_numpy(), alphas=validation_alphas) y_out_perceptron = classifier_perceptron.predict(x_test.to_numpy()) metrics_calculator = metric(list(y_test), y_out_perceptron, types=[ 'ACCURACY', 'AUC', 'precision', 'recall', 'f1_score', 'MCC' ]) metric_results = metrics_calculator.calculate() results['cf'].append( (metric_results['ACCURACY'], metrics_calculator.confusion_matrix(list(y_test), y_out_perceptron))) results['erros'].append((metric_results['ACCURACY'], classifier_perceptron.errors_per_epoch)) results['alphas'].append(classifier_perceptron.learning_rate) results['realization'].append(realization) for type in [ 'ACCURACY', 'AUC', 'precision', 'recall', 'f1_score', 'MCC' ]:
def k_fold_cross_validate(self, x, y, hidden, bias=True): """ @param x: @param y: @param hidden: @param bias: @return: """ if bias: x = self.add_bias(x) K = 10 N, M = x.shape validation_metrics = np.zeros((len(hidden), 1)) y = np.array(y, ndmin=2) if y.shape[0] == 1: y = y.T i = 0 for hidden_layer in hidden: k_validation_metrics = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) classifier = ExtremeLearningMachines( number_of_neurons=hidden_layer, N_Classes=self.N_Classes) classifier.fit(x_train_val, y_train_val, x_train_val=x_train_val, y_train_val=y_train_val, validation=False) y_out_val = classifier.predict(x_test_val) if self.case == 'classification': y_test_val = classifier.predicao(y_test_val) calculate_metrics = metric(y_test_val, y_out_val, types=[self.types[self.case]]) metric_results = calculate_metrics.calculate(average='micro') k_validation_metrics.append( metric_results[self.types[self.case]]) validation_metrics[i] = mean(k_validation_metrics) i += 1 if self.case == 'classification': hidden_indice, alpha_indice = np.unravel_index( np.argmax(validation_metrics, axis=None), validation_metrics.shape) elif self.case == 'regression': hidden_indice, alpha_indice = np.unravel_index( np.argmin(validation_metrics, axis=None), validation_metrics.shape) self.number_of_neurons = hidden[hidden_indice] print(self.number_of_neurons)
def k_fold_cross_validate(self, x, y, alphas, hidden, bias=True): """ @param x: @param y: @param alphas: @param hidden: @param bias: @return: """ if bias: x = self.add_bias(x) K = 10 N, M = x.shape validation_metrics = np.zeros((len(hidden), len(alphas))) y = np.array(y, ndmin=2) if y.shape[0] == 1: y = y.T i = 0 j = 0 for hidden_layer in hidden: for alpha in alphas: k_validation_metrics = [] for esimo in range(1, K + 1): L = int(x.shape[0] / K) x_train_val = (c_[x[:L * esimo - L, :].T, x[esimo * L:, :].T]).T x_test_val = (x[L * esimo - L:esimo * L, :]) y_train_val = (c_[y[:L * esimo - L, :].T, y[esimo * L:, :].T]).T y_test_val = (y[L * esimo - L:esimo * L, :]) N, M = x_train_val.shape classifier = MultiLayerPerceptron( M, self.N_Classes, hidden_layer_neurons=hidden_layer, learning_rate=alpha, epochs=500, Regressao=self.key) classifier.fit(x_train_val, y_train_val, validation=False, bias=False) y_out_val = classifier.predict(x_test_val) if self.case == 'classification': y_test_val = classifier.predicao(y_test_val) calculate_metrics = metric(y_test_val, y_out_val, types=[self.types[self.case]]) metric_results = calculate_metrics.calculate( average='micro') k_validation_metrics.append( metric_results[self.types[self.case]]) validation_metrics[i][j] = mean(k_validation_metrics) j += 1 i += 1 j = 0 if self.case == 'classification': hidden_indice, alpha_indice = np.unravel_index( np.argmax(validation_metrics, axis=None), validation_metrics.shape) elif self.case == 'regression': hidden_indice, alpha_indice = np.unravel_index( np.argmin(validation_metrics, axis=None), validation_metrics.shape) self.lr = alphas[alpha_indice] self.N_Neruronios = hidden[hidden_indice] self.done_validation = True