示例#1
0
def show_model(mean, stddev, weight):
    data = np.array([])
    output = np.array([])
    for i in range(-10, 10, 1):
        for j in range(-10, 10, 1):
            for k in range(-10, 10, 1):
                tmp = np.append(data, np.array([i, j, k]))
    data = data.reshape(1, -1, 3) / 10
    fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
              fnn_output_size, mean, stddev, weight, fnn_lr, 1)

    for element in data:
        output = np.append(output, fnn.forward(element))
    ModelScatter.output_scatter_3d(data, output, fnn_threshold1)
示例#2
0
def test_model(fnn_model):
    org_data, org_label = LoadData.get_method2_test()
    X_train, X_test, y_train, y_test = train_test_split(org_data,
                                                        org_label,
                                                        test_size=0.3)

    # Convert y_test(28 category to 6 category)
    y_test = np.array([int(e[1:2]) for e in y_test])

    print('<---Test Model Start--->')
    output_list = np.array([])
    for model in fnn_model:
        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, model.mean, model.stddev, model.weight,
                  fnn_lr, 1)
        output = fnn.testing_model(X_test)
        output_list = np.append(output_list, output)

    # y_label = label_convert(y_test, build_hash_table())
    output_list = output_list.reshape(-1, len(fnn_model))

    # 不再做正規畫了試試
    output_list = Normalize.normalization(output_list)

    label_pred, count = label_encoding(output_list, build_hash_table())
    # cnt = 0
    # for x, y in zip(output_list[0:10], y_test[0:10]):
    #     print(x, ' ', y, ' ', label_pred[cnt])
    #     cnt += 1

    for x, y in zip(y_test, label_pred):
        print('correct', x, '<->', 'predict', y)

    cnf_matrix = confusion_matrix(y_test, label_pred)
    # 做confusion matrix 的圖
    # plt.figure()
    # ConfusionMatrix.plot_confusion_matrix(cnf_matrix, classes=list(set(y_test)),
    #                       title='Confusion matrix(Final FNN Model)')

    cnf_accuracy = np.sum(cnf_matrix.diagonal()) / np.sum(cnf_matrix)

    print('FinalModel_Accuracy: ', accuracy_score(y_test, label_pred))

    print('This is the confusion matrix(test_all_model)\n', cnf_matrix)
    # print(C_matrix)
    # print(C_accuracy)

    print('<---Test Model Successfully--->')
    print('<----------------------------------------------->')
    return cnf_accuracy, count
示例#3
0
def get_fnn_output(data, fnn_attribute):
    forward_output_list = np.array([])
    for i in range(0, 6, 1):
        # print('<--- Print the FNN ' + str(nn) + ' Output--->')
        mean = fnn_attribute['Mean'][i]
        stddev = fnn_attribute['Stddev'][i]
        weight = fnn_attribute['Weight'][i]
        #
        # print('Mean'+str(i), mean)
        # print('Stddev'+str(i), stddev)
        # print('Weight'+str(i), weight)
        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, mean, stddev, weight, fnn_lr, 1)
        forward_output = fnn.forward(data)
        forward_output_list = np.append(forward_output_list, forward_output)
    return forward_output_list
示例#4
0
def train_fnn(nn):
    accuracy = 0.0
    matrix = np.array([])
    fnn_copy = FNN()
    all_nn_accuracy = np.array([])

    org_data, org_label = LoadData.get_method2_fnn_train(nn)
    org_label = np.array([1 if label == nn else 0 for label in org_label])
    X_train, X_test, y_train, y_test = train_test_split(org_data,
                                                        org_label,
                                                        test_size=0.3)
    # print(X_train, X_train.shape)
    # print(y_train, y_train.shape)

    print('<---Train the FNN ' + nn + ' Start--->')
    for i in range(fnn_random_size):
        # Random Generate the mean, standard deviation
        mean = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_membership_size)])
        stddev = np.array(
            [np.random.uniform(0, 1) for _ in range(fnn_membership_size)])
        weight = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_rule_size)])

        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, mean, stddev, weight, fnn_lr, 1)
        fnn.training_model(fnn_epoch, X_train, y_train)

        test_output = fnn.testing_model(X_test)
        label_pred = [
            1 if values >= fnn_threshold else 0 for values in test_output
        ]
        C_matrix = confusion_matrix(y_test, label_pred)
        C_accuracy = np.sum(C_matrix.diagonal()) / np.sum(C_matrix)
        all_nn_accuracy = np.append(all_nn_accuracy, C_accuracy)
        # print(C_matrix)
        # print(C_accuracy)
        if C_accuracy > accuracy:
            fnn_copy = copy.deepcopy(fnn)
            accuracy = copy.deepcopy(C_accuracy)
            matrix = copy.deepcopy(C_matrix)
            print('swap')
    print('<---Train the FNN ' + nn + ' Successfully--->')
    print('<----------------------------------------------->')

    # rel_path = 'Experiment/Graph/method2/Best_FNN_' + nn + '_error_trend.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    # ErrorPlot.error_trend(
    #     'Best_FNN_' + str(nn) + '_error_trend', len(fnn_copy.error_list), fnn_copy.error_list, abs_path)
    #
    # rel_path = 'Experiment/Graph/method2/Accuracy vs FNN' + str(nn) + '.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    # AccuracyPlot.build_accuracy_plot(
    #     'Accuracy vs FNN'+str(nn), np.array([i for i in range(1, len(all_nn_accuracy) + 1, 1)]),
    #     all_nn_accuracy, abs_path)

    return fnn_copy, accuracy, matrix
示例#5
0
def train_local_fnn(algorithm, X_train, X_test, y_train, y_test):
    accuracy = 0.0
    matrix = np.array([])
    fnn_copy = FNN()
    # This variable is used to store the all accuracy
    all_nn_accuracy = np.array([])
    for i in range(fnn_random_size):
        # Random Generate the mean, standard deviation
        mean = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_membership_size)])
        stddev = np.array(
            [np.random.uniform(0, 1) for _ in range(fnn_membership_size)])
        weight = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_rule_size)])

        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, mean, stddev, weight, fnn_lr, 1)
        fnn.training_model(fnn_epoch, X_train, y_train)

        # Test the FNN model, save the one that has the best accuracy
        test_output = fnn.testing_model(X_test)
        label_pred = np.array(
            [1 if value > fnn_threshold else 0 for value in test_output])

        C_matrix = confusion_matrix(y_test, label_pred)
        C_accuracy = np.sum(C_matrix.diagonal()) / np.sum(C_matrix)
        all_nn_accuracy = np.append(all_nn_accuracy, C_accuracy)
        if C_accuracy > accuracy:
            accuracy = copy.deepcopy(C_accuracy)
            fnn_copy = copy.deepcopy(fnn)
            matrix = copy.deepcopy(C_matrix)

    # Choose the best FNN to Plot error trend
    # rel_path = './Experiment/Method3/Graph/Best_FNN_'+str(nn)+'_error_trend.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    # ErrorPlot.error_trend('Best_FNN_'+str(nn)+'_error_trend',
    #                       len(fnn_copy.error_list), fnn_copy.error_list, abs_path)

    # Choose the best Accuracy to Plot
    # rel_path = './Experiment/Method3/Graph/Accuracy vs FNN'+str(nn)+'.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    # AccuracyPlot.build_accuracy_plot(
    #    'Accuracy vs FNN' +
    #    str(nn), np.array([i for i in range(1, len(all_nn_accuracy) + 1, 1)]),
    #    all_nn_accuracy, abs_path)

    return fnn_copy, accuracy, matrix
示例#6
0
def train_keras_lnn(nn_array, org_data, org_label, algorithm):
    """Get the fnn output and input the lnn"""
    fnn_output = np.array([])
    for name in nn_array:
        print('<---nn -> ', name, '--->')
        rel_path = './Experiment/Method3/FNNModel/' + name + '.json'
        abs_path = os.path.join(os.path.dirname(__file__), rel_path)
        attribute = LoadData.load_fnn_weight(abs_path)
        mean = np.asarray(attribute['Mean'])
        stddev = np.asarray(attribute['Stddev'])
        weight = np.asarray(attribute['Weight'])
        # Test the FNN
        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, mean, stddev, weight, fnn_lr, 1)
        result = fnn.testing_model(org_data)
        fnn_output = np.append(fnn_output, result)

    fnn_output = fnn_output.reshape(len(nn_array), -1).T

    # fnn_label = np.array([int(e[1:2])-1 for e in org_label])
    print('org_label', org_label)
    fnn_label = label_convert(org_label)
    X_train, X_test, y_train, y_test = train_test_split(fnn_output,
                                                        fnn_label,
                                                        test_size=0.3,
                                                        random_state=42)
    print('X_train.shape', X_train.shape)
    print('y_train.shape', y_train.shape)

    # Construct the lnn
    y_trainOneHot = np_utils.to_categorical(y_train)
    y_testOneHot = np_utils.to_categorical(y_test)

    model = Sequential()
    model.add(Dense(units=32, input_dim=12))
    model.add(Dense(32, activation='tanh'))
    model.add(
        Dense(units=12, kernel_initializer='normal', activation='softmax'))
    adam = optimizers.Adam(lr=0.001)
    model.compile(loss='mean_squared_error', optimizer=adam, metrics=['mse'])
    model.summary()

    train_history = model.fit(x=X_train,
                              y=y_trainOneHot,
                              validation_split=0.2,
                              epochs=30,
                              batch_size=200,
                              verbose=2)
    show_train_history(train_history, 'mean_squared_error',
                       'val_mean_squared_error', 'mean_squared_error.png')
    show_train_history(train_history, 'loss', 'val_loss', 'loss.png')

    scores = model.evaluate(X_test, y_testOneHot)
    print('scores', scores)

    prediction = model.predict(X_test)
    for x, y in zip(prediction[:10], y_testOneHot[:10]):
        print(x, ' ', y)

    prediction = model.predict_classes(X_test)
    y_pred = prediction_convert(prediction)
    yy = onehot_convert(y_testOneHot)

    print(set(y_pred))
    print(set(yy))

    cnf_matrix = confusion_matrix(yy, y_pred)
    print('accuracy_score', accuracy_score(yy, y_pred))
    print('cnf_matrix\n', cnf_matrix)
    rel_path = './Experiment/method3/Graph/cnf_lnn.png'
    abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    plt.figure(figsize=(8, 6), dpi=200)
    ConfusionMatrix.plot_confusion_matrix(cnf_matrix,
                                          abs_path,
                                          classes=list(set(y_pred)),
                                          title='Final Model Confusion matrix')
示例#7
0
def train_local_fnn(nn, algorithm):

    # Declare variables
    nn_mean, nn_stddev, nn_weight = (0.0 for _ in range(3))
    accuracy = 0.0
    matrix = np.array([])
    record_fnn = FNN()
    loss_list = np.array([])

    # This variable is used to store the all accuracy
    all_nn_accuracy = np.array([])

    # Load file FNN_Train_data_' + str(num) + '.xlsx
    org_data, org_label = LoadData.get_method1_fnn_train(nn)
    org_label = np.array([1 if element == nn else 0 for element in org_label])

    # Reduce dimension and generate train/test data
    reduced_data = reduce_dimension(org_data, org_label, algorithm)
    # normalized_data = preprocessing.normalize(reduced_data)
    # reduced_data = normalization(reduced_data)

    # 正規化 1
    # min_max_scaler = preprocessing.MinMaxScaler()
    # normalized_data = min_max_scaler.fit_transform(reduced_data)

    # 正規化 2
    # normalized_data = preprocessing.scale(reduced_data)

    # 正規化 3
    normalized_data = Normalize.normalization(reduced_data)

    X_train, X_test, y_train, y_test = train_test_split(normalized_data,
                                                        org_label,
                                                        test_size=0.3)
    # print(X_train, X_train.shape)
    # print(y_train, y_train.shape)

    # Train the FNN
    print('<---Train the FNN' + str(nn) + ' Start--->')
    for i in range(fnn_random_size):

        # Random Generate the mean, standard deviation
        mean = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_membership_size)])
        stddev = np.array(
            [np.random.uniform(0, 1) for _ in range(fnn_membership_size)])
        weight = np.array(
            [np.random.uniform(-1, 1) for _ in range(fnn_rule_size)])
        """
        # Generate FNN object to train
        # para1 -> fnn input layer size
        # para2 -> fnn membership layer size
        # para3 -> fnn rule layer size
        # para4 -> fnn output layer size
        # para5 -> random mean values
        # para6 -> random stddev values
        # para7 -> random weight values
        # para8 -> nn label type
        """
        fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
                  fnn_output_size, mean, stddev, weight, fnn_lr, 1)
        fnn.training_model(fnn_epoch, X_train, y_train)

        # Test the FNN model, save the one that has the best accuracy
        test_output = fnn.testing_model(X_test)

        label_pred = label_encode(nn, test_output)
        # print(y_test.shape)
        # print(label_pred.shape)
        # print(y_test)
        # print(label_pred)

        C_matrix = confusion_matrix(y_test, label_pred)
        C_accuracy = np.sum(C_matrix.diagonal()) / np.sum(C_matrix)
        all_nn_accuracy = np.append(all_nn_accuracy, C_accuracy)

        # print(C_matrix)
        # print(C_accuracy)
        if C_accuracy > accuracy:
            accuracy = copy.deepcopy(C_accuracy)
            nn_mean = copy.deepcopy(fnn.mean)
            nn_stddev = copy.deepcopy(fnn.stddev)
            nn_weight = copy.deepcopy(fnn.weight)
            matrix = copy.deepcopy(C_matrix)
            record_fnn = copy.deepcopy(fnn)
            loss_list = copy.deepcopy(fnn.loss_list)
        """
        Every error trend graph will output
        Output the Error Plot to observe trend
        """
        # rel_path = './Data/Graph/' + str(i) + '_FNN_' + str(nn) + '_error_trend.png'
        # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
        # ErrorPlot.error_trend(
        #     str(i) + '_FNN_' + str(nn) + '_error_trend', len(fnn.error_list), fnn.error_list, abs_path)

    print('<---Train the FNN' + str(nn) + ' Successfully--->')
    print('<----------------------------------------------->')

    # print('1_目錄:', os.getcwd())

    # First Time, you need to create a folder
    if nn == 1:
        org_path = './Data/Graph/'
        makedir(org_path, algorithm)
    # else:
    #     os.chdir('./Data/Graph/' + dimension_reduce_algorithm)
    # print('2_目錄:', os.getcwd())

    # Choose the best FNN to Plot error trend
    # rel_path = org_path + 'Best_FNN_' + str(nn) + '_error_trend.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    abs_path = os.getcwd() + '\\Best_FNN_' + str(nn) + '_error_trend.png'
    # print('ErrorPlot', abs_path)
    ErrorPlot.error_trend('Best_FNN_' + str(nn) + '_error_trend',
                          len(record_fnn.error_list), record_fnn.error_list,
                          abs_path)

    abs_path = os.getcwd() + '\\Best_FNN_' + str(nn) + '_loss_trend.png'
    # Choose the best FNN to Plot loss on every epoch
    # ErrorPlot.loss_trend(
    #     'Best_FNN_' + str(nn) + '_loss_trend', len(loss_list), loss_list, abs_path)

    # Choose the best Accuracy to Plot
    # rel_path = org_path + 'Accuracy vs FNN' + str(nn) + '.png'
    # abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    abs_path = os.getcwd() + '\\Accuracy vs FNN' + str(nn) + '.png'
    # print('AccuracyPlot', abs_path)
    AccuracyPlot.build_accuracy_plot(
        'Accuracy vs FNN' + str(nn),
        np.array([i for i in range(1,
                                   len(all_nn_accuracy) + 1, 1)]),
        all_nn_accuracy, abs_path)

    return nn_mean, nn_stddev, nn_weight, accuracy, matrix
示例#8
0
# print('org_label.shape', org_label)

output_array = np.array([])

# Load the test data, forward, store
for nn in nn_category:
    print('nn -> ', nn)
    rel_path = '../Experiment/Method2/FNNModel/FNN/' + str(nn) + '.json'
    abs_path = os.path.join(os.path.dirname(__file__), rel_path)
    attribute = LoadData.load_fnn_weight(abs_path)
    # print(attribute)
    mean = np.asarray(attribute['Mean'])
    stddev = np.asarray(attribute['Stddev'])
    weight = np.asarray(attribute['Weight'])
    # Test the FNN
    fnn = FNN(fnn_input_size, fnn_membership_size, fnn_rule_size,
              fnn_output_size, mean, stddev, weight, fnn_lr, 1)
    output = fnn.testing_model(org_data)
    output_array = np.append(output_array, output)

# 轉置矩陣
print(len(nn_category))
output_array = output_array.reshape(len(nn_category), -1).T
print('output_array', output_array)
print(output_array.shape)

# label encoding
y_pred = np.array([])
denominator = np.array([cluster_num[e] for e in all_label])
print('denominator', denominator)
count = [0 for _ in range(2)]
for array in output_array: