def gridsearch_reservoir_computing(self, train_X, train_y, test_X, test_y, per_time_step=False, error='mse', gridsearch_training_frac=0.7): tuned_parameters = { 'a': [0.6, 0.8], 'reservoir_size': [400, 700, 1000] } # tuned_parameters = {'a': [0.4], 'reservoir_size':[250]} params = tuned_parameters.keys() combinations = self.generate_parameter_combinations( tuned_parameters, params) split_point = int(gridsearch_training_frac * len(train_X.index)) train_params_X = train_X.ix[0:split_point, ] test_params_X = train_X.ix[split_point:len(train_X.index), ] train_params_y = train_y.ix[0:split_point, ] test_params_y = train_y.ix[split_point:len(train_X.index), ] if error == 'mse': best_error = sys.float_info.max elif error == 'accuracy': best_error = 0 best_combination = [] for comb in combinations: print comb # Order of the keys might have changed. keys = tuned_parameters.keys() pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob = self.reservoir_computing( train_params_X, train_params_y, test_params_X, test_params_y, reservoir_size=comb[keys.index('reservoir_size')], a=comb[keys.index('a')], per_time_step=per_time_step, gridsearch=False) if error == 'mse': eval = RegressionEvaluation() mse = eval.mean_squared_error(test_params_y, pred_test_y_prob) if mse < best_error: best_error = mse best_combination = comb elif error == 'accuracy': eval = ClassificationEvaluation() acc = eval.accuracy(test_params_y, pred_test_y) if acc > best_error: best_error = acc best_combination = comb print '-------' print best_combination print '-------' return best_combination[keys.index( 'reservoir_size')], best_combination[keys.index('a')]
def gridsearch_recurrent_neural_network(self, train_X, train_y, test_X, test_y, error='accuracy', gridsearch_training_frac=0.7): tuned_parameters = { 'n_hidden_neurons': [50, 100], 'iterations': [250, 500], 'outputbias': [True] } params = list(tuned_parameters.keys()) combinations = self.generate_parameter_combinations( tuned_parameters, params) split_point = int(gridsearch_training_frac * len(train_X.index)) train_params_X = train_X.iloc[0:split_point, ] test_params_X = train_X.iloc[split_point:len(train_X.index), ] train_params_y = train_y.iloc[0:split_point, ] test_params_y = train_y.iloc[split_point:len(train_X.index), ] if error == 'mse': best_error = sys.float_info.max elif error == 'accuracy': best_error = 0 best_combination = [] for comb in combinations: print(comb) # Order of the keys might have changed. keys = list(tuned_parameters.keys()) # print(keys) pred_train_y, pred_test_y, pred_train_y_prob, pred_test_y_prob = self.recurrent_neural_network( train_params_X, train_params_y, test_params_X, test_params_y, n_hidden_neurons=comb[keys.index('n_hidden_neurons')], iterations=comb[keys.index('iterations')], outputbias=comb[keys.index('outputbias')], gridsearch=False) if error == 'mse': eval = RegressionEvaluation() mse = eval.mean_squared_error(test_params_y, pred_test_y_prob) if mse < best_error: best_error = mse best_combination = comb elif error == 'accuracy': eval = ClassificationEvaluation() acc = eval.accuracy(test_params_y, pred_test_y) if acc > best_error: best_error = acc best_combination = comb print('-------') print(best_combination) print('-------') return best_combination[params.index( 'n_hidden_neurons')], best_combination[params.index( 'iterations')], best_combination[params.index('outputbias')]