base = load_base(path='iris.data', type='csv') # normalizar a base base[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']] = normalization( base[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']], type='min-max') N, M = base.shape C = len(base['Species'].unique()) y_out_of_c = pd.get_dummies(base['Species']) base = base.drop(['Species'], axis=1) base = concatenate([base[['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']], y_out_of_c], axis=1) for realization in range(20): train, test = split_random(base, train_percentage=.8) train, train_val = split_random(train, train_percentage=.8) x_train = train[:, :4] y_train = train[:, 4:] x_train_val = train_val[:, :4] y_train_val = train_val[:, 4:] x_test = test[:, :4] y_test = test[:, 4:] validation_alphas = [0.15] hidden = 3 * np.arange(1, 5) simple_net = MultiLayerPerceptron(M, C, epochs=10000) simple_net.fit(x_train, y_train, x_train_val=x_train_val, y_train_val=y_train_val, alphas=validation_alphas, hidden=hidden)
# ----------------------------------------------------------------------------------------------- # accuracys = [] results = { 'versus': [], 'realization': [], 'ACCURACY': [], 'f1_score': [], 'precision': [], 'recall': [], 'cf': [] } C = len(iris_base['Species'].unique()) for realization in range(20): train, test = split_random(iris_base) x_train = train.drop(['Species'], axis=1) y_train = train['Species'] x_test = test.drop(['Species'], axis=1) y_test = test['Species'] classifier_knn = knn(x_train.to_numpy(), y_train.to_numpy(), k=3, class_column_name='Species') y_out_knn = classifier_knn.predict(x_test.to_numpy()) metrics_calculator = metric( list(y_test),
'versus': [], 'realization': [], 'ACCURACY': [], 'AUC': [], 'MCC': [], 'f1_score': [], 'precision': [], 'recall': [], 'alphas': [], 'cf': [], 'erros': [] } validation_alphas = linspace(0.015, 0.1, 20) for realization in range(20): train, test = split_random(iris_base, train_percentage=0.8) train, train_val = split_random(train, train_percentage=0.7) x_train = train.drop(['Species'], axis=1) y_train = train['Species'] x_train_val = train_val.drop(['Species'], axis=1) y_train_val = train_val['Species'] x_test = test.drop(['Species'], axis=1) y_test = test['Species'] classifier_perceptron = perceptron(epochs=1000, learning_rate=0.01) classifier_perceptron.fit(x_train.to_numpy(), y_train.to_numpy(),
final_result = { 'MSE': [], 'std MSE': [], 'RMSE': [], 'std RMSE': [], 'alphas': [] } results = { 'realization': [], 'MSE': [], 'RMSE': [], 'alphas': [] } for realization in range(5): train, test = split_random(df, train_percentage=.8) # train, train_val = split_random(train, train_percentage=.8) # ------------------------------ x and y for training ----------------------------------- x_train = train[features] y_train = train[different_target].to_numpy().reshape(train[different_target].shape[0], 1) # ------------------------------ x and y for validation ----------------------------------- # x_train_val = train_val[features] # y_train_val = train_val[different_target] # ------------------------------ x and y for test ------------------------------------------ x_test = test[features] y_test = test[different_target]
new_df['temp_inside'] = new_df['temp_inside'].fillna( new_df['temp_inside'].mean()) print(new_df.info()) # --------------------------------------------------------------------------------------------- # normalizar a base new_df[features] = normalization(new_df[features], type='min-max') # -------------------------------------------------------------------------------------------- N, M = new_df.shape C = 1 # Problema de regressão for realization in range(20): train, test = split_random(new_df, train_percentage=.8) train, train_val = split_random(train, train_percentage=.8) x_train = train[features] y_train = train[target] x_train_val = train_val[features] y_train_val = train_val[target] x_test = test[features] y_test = test[target] validation_alphas = [1.0, 1.5, 2.0] hidden = [10, 15, 20] simple_net = RadialBasisFunction(number_of_neurons=15, N_Classes=1,