예제 #1
0
    def train_support_vector_machine(self,
                                     X_train,
                                     X_test,
                                     y_train,
                                     y_test,
                                     save_mdl=False,
                                     save_loc=Weights_File,
                                     max_epochs=17000,
                                     cost_thresh=0.001,
                                     l_r=0.000001,
                                     r_s=10000):
        # set global parameters
        global Learning_rate
        global Reg_strength
        Learning_rate = l_r
        Reg_strength = r_s

        # normalize the data
        x_train_cpy = Utils.normalize_numpy_array(X_train,
                                                  self.__min_max_scalar)
        x_test_cpy = Utils.normalize_numpy_array(X_test, self.__min_max_scalar)
        # print(type(x_train_cpy))    # debug

        # creating and appending col of 1's to train and test data (this col is the intercept)
        intercept_train = np.ones(x_train_cpy.shape[0])
        intercept_test = np.ones(x_test_cpy.shape[0])

        x_train_cpy = np.hstack(
            (x_train_cpy, np.atleast_2d(intercept_train).T))
        x_test_cpy = np.hstack((x_test_cpy, np.atleast_2d(intercept_test).T))

        # setting attributes
        self.__x_train = x_train_cpy
        self.__x_test = x_test_cpy
        self.__y_train = y_train
        self.__y_test = y_test

        # using stochastic gradient to find the optimal weights for linear classifier
        print("Training Support Vector Machine")  # debug
        self.__trained_weights = stochastic_gradient_descent(
            self.__x_train,
            self.__y_train,
            max_epochs=max_epochs,
            cost_thresh=cost_thresh)

        print("Trained Weights: ", self.__trained_weights)

        print("Finished Training SupportVectorMachine")

        self.__test_support_vector_machine()

        if save_mdl:
            self.__save_trained_weights(save_loc)
예제 #2
0
    def predict_sample(self, sample_data, return_lbl=False):
        try:
            if self.__trained_weights is not None:
                # min_max_scalar = Utils.calculate_min_max_scalar(pd.read_csv(self.training_data_dir))

                # sample_data_copy = sample_data.copy(deep=True)
                sample_data_copy = copy.deepcopy(sample_data)

                # sample_data_copy = Utils.normalize_dataset(sample_data_copy, self.__min_max_scalar)
                sample_data_copy = Utils.normalize_numpy_array(
                    sample_data_copy, self.__min_max_scalar)

                # sample_data_copy.insert(loc=len(sample_data_copy.columns), column='intercept', value=1)
                intercept_train = np.ones(sample_data_copy.shape[0])
                sample_data_copy = np.hstack(
                    (sample_data_copy, np.atleast_2d(intercept_train).T))

                y_predict = np.sign(
                    np.dot(sample_data_copy, self.__trained_weights))

                if return_lbl is True:
                    if y_predict > 0:
                        return 'ASD'
                    else:
                        return 'Normal'

                return y_predict
            else:
                print("Train the SVM first OR load the trained weights")

        except Exception as e:
            print("Error occurred predicting the sample in SVM.")
            print('Exception', e)
            traceback.print_exc()
    def train_MultiLayerPerceptron(self,
                                   X_train,
                                   X_test,
                                   y_train,
                                   y_test,
                                   save_mdl=False,
                                   save_loc=MultilayerPerceptronMdlFl,
                                   hidden_layer_sizes=16,
                                   max_iter=400):

        x_train_cpy = Utils.normalize_numpy_array(X_train,
                                                  self.__min_max_scalar)
        x_test_cpy = Utils.normalize_numpy_array(X_test, self.__min_max_scalar)

        self.__x_train = x_train_cpy
        self.__x_test = x_test_cpy
        self.__y_train = y_train
        self.__y_test = y_test

        self.__max_iter = max_iter
        self.__hidden_layer_size = hidden_layer_sizes

        print("Training MultiLayerPerceptron...")
        t0 = time.time()  # test purposes

        self.__mlp_classifier = MLPClassifier(
            hidden_layer_sizes=self.__hidden_layer_size,
            activation='relu',
            max_iter=self.__max_iter,
            solver='adam')

        self.__mlp_classifier = self.__mlp_classifier.fit(
            self.__x_train, self.__y_train)

        t1 = time.time()  # test purposes
        print("Finished Training MultiLayerPerceptron in: ", t1 - t0)

        self.__test_MultiLayerPerceptron()

        if save_mdl:
            self.__save_MultiLayerPerceptron(save_loc)
    def predict(self, sample_data, return_lbl=False):
        # normalize the sample data
        # sample_data = Utils.normalize_dataset(sample_data, self.__min_max_scalar)
        sample_data = Utils.normalize_numpy_array(sample_data,
                                                  self.__min_max_scalar)

        prediction = self.__mlp_classifier.predict([sample_data])

        if return_lbl is True:
            if prediction > 0:
                return 'ASD'
            else:
                return 'Normal'
        else:
            return prediction