test_X = test_data['inputs'] test_y = test_data['targets'] #normalize the value of features to be between -1 to 1 for j in xrange(test_X.shape[1]): test_X[:,j] = (test_X[:,j] - train_X_features_mean[0,j])/train_X_features_range[0,j] # Run classifier model = None params = json.loads(params_json) # Standard algorithms if not ensemble_method: # KNN if model_name == 'knn': model = KNNModel(params) # KNNModel({'k': 10}) # Logistic elif model_name == 'logistic': model = LogisticRegressionModel(params) # LogisticRegressionModel({'penalty': 'l2', 'regularization_term': 0.1}) # SVM elif model_name == 'svm': model = SVMModel(params) # SVMModel({'kernel': 'rbf', 'probability_flag': False}) # MoG elif model_name == 'mog': model = MOGModel(params) # MOGModel({'n_components': 20}) # Multi-layer perceptron (NNets) elif model_name == 'neural_net':
from models import SVMModel, KNNModel from util import read_data, number_of_inputs data = read_data('training_data/walls.csv') # divide loaded data to inputs and outputs X = data[:, :number_of_inputs] y = data[:, -1] model = SVMModel() model.fit(X, y) model.save('model/walls_svm.pkl') model = KNNModel() model.fit(X, y) model.save('model/walls_knn.pkl')
valid_X = valid_data['inputs'] valid_y = valid_data['targets'] #normalize the value of features to be between -1 to 1 for j in xrange(valid_X.shape[1]): valid_X[:,j] = (valid_X[:,j] - train_X_features_mean[0,j])/train_X_features_range[0,j] # Run classifier model = None params = json.loads(params_json) # Standard algorithms if not ensemble_method: # KNN if model_name == 'knn': model = KNNModel(params) # KNNModel({'k': 10}) # Logistic elif model_name == 'logistic': model = LogisticRegressionModel(params) # LogisticRegressionModel({'penalty': 'l2', 'regularization_term': 0.1}) # SVM elif model_name == 'svm': model = SVMModel(params) # SVMModel({'kernel': 'rbf', 'probability_flag': False}) # MoG elif model_name == 'mog': model = MOGModel(params) # MOGModel({'n_components': 20}) # Multi-layer perceptron (NNets) elif model_name == 'neural_net':