axis=1) # NaN を含む変数を削除 # 標準偏差が 0 の説明変数を削除 std_0_variable_flags = original_x.std() == 0 x = original_x.drop(original_x.columns[std_0_variable_flags], axis=1) variables = pd.concat([y, x], axis=1) numbers_of_x = np.arange(numbers_of_y[-1] + 1, variables.shape[1]) # standardize x and y autoscaled_variables = (variables - variables.mean(axis=0)) / variables.std( axis=0, ddof=1) autoscaled_target_y_value = (target_y_value - variables.mean( axis=0)[numbers_of_y]) / variables.std(axis=0, ddof=1)[numbers_of_y] # construct GTMR model model = GTM(shape_of_map, shape_of_rbf_centers, variance_of_rbfs, lambda_in_em_algorithm, number_of_iterations, display_flag) model.fit(autoscaled_variables) if model.success_flag: # calculate of responsibilities responsibilities = model.responsibility(autoscaled_variables) means = responsibilities.dot(model.map_grids) modes = model.map_grids[responsibilities.argmax(axis=1), :] mean_of_estimated_mean_of_y, mode_of_estimated_mean_of_y, responsibilities_y, py = \ model.gtmr_predict(autoscaled_variables.iloc[:, numbers_of_x], numbers_of_x, numbers_of_y) plt.rcParams['font.size'] = 18 for index, y_number in enumerate(numbers_of_y): predicted_y_test = mode_of_estimated_mean_of_y[:, index] * variables.iloc[:, y_number].std( ) + variables.iloc[:, y_number].mean()
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #=========================================================================*/ # load gtm module from gtm import GTM db = GTM() print db.about() print db.version() getValue = "Initially empty" for k in xrange(1, 1000): db.set("^FibonacciA", "1") db.set("^FibonacciB", "1") termnumber = 100 for i in xrange(1, termnumber):
plt.xlabel('y1') plt.ylabel('y2') plt.show() variables = np.c_[x, y1, y2] variables_train, variables_test = train_test_split( variables, test_size=number_of_test_samples, random_state=100) # standardize x and y autoscaled_variables_train = (variables_train - variables_train.mean(axis=0) ) / variables_train.std(axis=0, ddof=1) autoscaled_variables_test = (variables_test - variables_train.mean(axis=0) ) / variables_train.std(axis=0, ddof=1) # optimize hyperparameter in GTMR with CV model = GTM() model.gtmr_cv_opt(autoscaled_variables_train, numbers_of_y, candidates_of_shape_of_map, candidates_of_shape_of_rbf_centers, candidates_of_variance_of_rbfs, candidates_of_lambda_in_em_algorithm, fold_number, number_of_iterations) model.display_flag = display_flag print('optimized shape of map :', model.shape_of_map) print('optimized shape of RBF centers :', model.shape_of_rbf_centers) print('optimized variance of RBFs :', model.variance_of_rbfs) print('optimized lambda in EM algorithm :', model.lambda_in_em_algorithm) # construct GTMR model model.fit(autoscaled_variables_train) if model.success_flag: