コード例 #1
0
def neural_network(x_train, x_test, y_train, y_test, x_pca, x_ica, x_kpca,
                   x_rp, x_kmeans, x_gmm, **kwargs):
    """Perform neural network experiment.

        Args:
           x_train (ndarray): training data.
           x_test (ndarray): test data.
           y_train (ndarray): training labels.
           y_test (ndarray): test labels.
           x_pca (ndarray): reduced dataset by PCA.
           x_ica (ndarray): reduced dataset by ICA.
           x_kpca (ndarray): reduced dataset by KPCA.
           x_rp (ndarray): reduced dataset by RP.
           x_kmeans (ndarray): clusters produced by k-Means.
           x_gmm (ndarray): clusters produced by Gaussian Mixture Models.
           kwargs (dict): additional arguments to pass:
                    - layer1_nodes (int): number of neurons in first layer.
                    - layer2_nodes (int): number of neurons in second layer.
                    - learning_rate (float): learning rate.

        Returns:
           None.
        """

    print('\n--------------------------')
    print('NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the original dataset
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_train, x_test, y_train, y_test)

    print('\n--------------------------')
    print('PCA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by PCA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_pca[0], x_pca[1], y_train, y_test)

    print('\n--------------------------')
    print('ICA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by ICA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_ica[0], x_ica[1], y_train, y_test)

    print('\n--------------------------')
    print('KPCA + NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by KPCA
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_kpca[0], x_kpca[1], y_train, y_test)

    print('\n--------------------------')
    print('RP+ NN')
    print('--------------------------')

    # Declare Neural Network and perform experiments on the reduced dataset by RP
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])
    nn.experiment(x_rp[0], x_rp[1], y_train, y_test)

    print('\n--------------------------')
    print('KMEANS+ NN')
    print('--------------------------')

    # Declare Neural Network
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])

    # Augment the original dataset by adding clusters produced by k-Means as features
    x_kmeans_normalized = (x_kmeans[0] - np.mean(x_kmeans[0])) / np.std(
        x_kmeans[0])
    x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)
    x_train_new = np.append(x_train, x_kmeans_normalized, axis=1)
    x_kmeans_normalized = (x_kmeans[1] - np.mean(x_kmeans[1])) / np.std(
        x_kmeans[1])
    x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)
    x_test_new = np.append(x_test, x_kmeans_normalized, axis=1)

    # Perform experiments on it
    nn.experiment(x_train_new, x_test_new, y_train, y_test)

    print('\n--------------------------')
    print('GMM+ NN')
    print('--------------------------')

    # Declare Neural Network
    nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],
                       layer2_nodes=kwargs['layer2_nodes'],
                       learning_rate=kwargs['learning_rate'])

    # Augment the original dataset by adding clusters produced by Gaussian Mixture Models as features
    x_gmm_normalized = (x_gmm[0] - np.mean(x_gmm[0])) / np.std(x_gmm[0])
    x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)
    x_train_new = np.append(x_train, x_gmm_normalized, axis=1)
    x_gmm_normalized = (x_gmm[1] - np.mean(x_gmm[1])) / np.std(x_gmm[1])
    x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)
    x_test_new = np.append(x_test, x_gmm_normalized, axis=1)

    # Perform experiments on it
    nn.experiment(x_train_new, x_test_new, y_train, y_test)
コード例 #2
0
def experiment(x_train, x_test, y_train, y_test):
    """Perform experiment.

        Args:
           x_train (ndarray): training data.
           x_test (ndarray): test data.
           y_train (ndarray): training labels.
           y_test (ndarray): test labels.

        Returns:
           None.
        """

    # Array of training sizes to plot the learning curves over.
    training_sizes = np.arange(20, int(len(x_train) * 0.9), 10)

    # K-Nearest Neighbor
    print('\n--------------------------')
    knn = KNN(k=1, weights='uniform', p=2)
    knn.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.3,
                   n_neighbors_range=np.arange(1, 50, 2),
                   p_range=np.arange(1, 20),
                   weight_functions=['uniform', 'distance'],
                   train_sizes=training_sizes)

    # Support Vector Machines
    print('\n--------------------------')
    svm = SVM(c=1., kernel='rbf', degree=3, gamma=0.001, random_state=42)
    svm.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.2,
                   C_range=[1, 5] + list(range(10, 100, 20)) +
                   list(range(100, 1000, 50)),
                   kernels=['linear', 'poly', 'rbf'],
                   gamma_range=np.logspace(-7, 0, 50),
                   poly_degrees=[2, 3, 4],
                   train_sizes=training_sizes)

    # Decision Trees
    print('\n--------------------------')
    dt = DecisionTree(max_depth=1, min_samples_leaf=1, random_state=42)
    dt.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  max_depth_range=list(range(1, 50)),
                  min_samples_leaf_range=list(range(1, 30)),
                  train_sizes=training_sizes)

    # AdaBoost
    print('\n--------------------------')
    boosted_dt = AdaBoost(n_estimators=50,
                          learning_rate=1.,
                          max_depth=3,
                          random_state=42)
    boosted_dt.experiment(x_train,
                          x_test,
                          y_train,
                          y_test,
                          cv=10,
                          y_lim=0.2,
                          max_depth_range=list(range(1, 30)),
                          n_estimators_range=[1, 3, 5, 8] +
                          list(range(10, 100, 5)) + list(range(100, 1000, 50)),
                          learning_rate_range=np.logspace(-6, 1, 50),
                          train_sizes=training_sizes)

    # Neural Networks
    print('\n--------------------------')
    nn = NeuralNetwork(alpha=0.01,
                       layer1_nodes=50,
                       layer2_nodes=30,
                       learning_rate=0.001,
                       max_iter=100)
    nn.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  alpha_range=np.logspace(-5, 1, 30),
                  learning_rate_range=np.logspace(-4, 0, 50),
                  train_sizes=training_sizes)