def train_dnf_network(data, targets, q): res = MultiOutLNN.train_lnn( data, targets, 5000 * len(data), len(data[0]), [2**n], 1, [MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation], False) wrong = MultiOutLNN.run_lnn( data, targets, res, [MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation], False) er = float(wrong) / float(len(data)) q.put(er)
X_test = np.array(test[0]) Y_test = np.array(test[1]) num_inputs = len(X_train[0]) num_outputs = len(Y_train[0]) ## CONFIGURATION ## hidden_layers = [4] activations = [ MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_and_activation ] iterations = len(X_train) * 30 ## ## ## ## ## ## ## res = MultiOutLNN.train_lnn(X_train, Y_train, iterations, num_inputs, hidden_layers, num_outputs, ["OR", "AND"], activations, True) #rule = MultiOutLNN.ExtractRules(len(X_train[0]), res, ["OR", "AND"]) #print(len(rule)) #for i in range(len(rule)): # print(i) # print(rule[i]) # print() print("Training") print("Total Number Of Samples: ", len(X_train)) print( "Network Wrong: ", MultiOutLNN.run_lnn(X_train, Y_train, res, num_inputs, [8], num_outputs,
import numpy as np dir_path = os.path.dirname(os.path.realpath(__file__)) print(dir_path + "/") print(tf.gfile.Exists(dir_path + "/")) mnist = input_data.read_data_sets(dir_path + "/", one_hot=True) X_train = mnist.train.images Y_train = mnist.train.labels X_test = mnist.test.images Y_test = mnist.test.labels network = np.load('network.npy') rules = MultiOutLNN.ExtractFuzzyRules(len(X_train[0]), network, ["OR", "AND"], 0.5, 2) print() print() for i in range(len(rules)): print(i) print(rules[i]) print() ##wrong = MultiOutLNN.test_fuzzy_rules(rules, X_train, Y_train) ##er = wrong/len(X_train) ##train_rule_wrong = er ##print("Rules Error Rate: ", er) wrong = MultiOutLNN.test_fuzzy_rules(rules, X_test, Y_test) er = wrong / len(X_test)
if i in idx: X_train.append(data[i]) Y_train.append(targets[i]) else: X_test.append(data[i]) Y_test.append(targets[i]) return [np.array(X_train), np.array(Y_train)], [np.array(X_test), np.array(Y_test)] examples, targets = VehicleData.read_data() train, test = split_data(examples, targets, 0.7) activations = [MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_and_activation] net = MultiOutLNN.train_lnn(train[0], train[1], 500 * len(train[0]), len(train[0][0]), [60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 30, 30, 30, 30, 30, 30, 30, 30], 4, activations, True) print(len(train[0][0])) print(train[0][0]) rules = MultiOutLNN.ExtractFuzzyRules(len(train[0][0]), net, ['OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'OR', 'AND'], 0.1, 1) print() print() for i in range(len(rules)): print(i) #print(len(rules[i])) print() print("Training Set") wrong = MultiOutLNN.run_lnn(train[0], train[1], net, activations, True)
cnf_errors = [] dnf_errors = [] pcep_errors = [] cnf_rule_errors = [] dnf_rule_errors = [] # Peform LOE Cross-Validation for i in range(0, len(data)): print(i, " : ", len(data)) data_p = data[np.arange(len(data)) != i] targets_p = targets[np.arange(len(data)) != i] print("Training Error") cnf = MultiOutLNN.train_lnn( data_p, targets_p, 100000, len(data[0]), [2**len(data[0])], len(targets[0]), [MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_and_activation], True) #train_cnf_network(6, data_p, targets_p, 100000 * 0, 3) #print("CNF: ", cnf[2]) dnf = MultiOutLNN.train_lnn( data_p, targets_p, 100000, len(data[0]), [2**len(data[0])], len(targets[0]), [MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation], True) #train_dnf_network(6, data_p, targets_p, 100000 * 0, 3) #print("DNF: ", dnf[2]) pcep = MultiOutNN.train_lnn(data_p, targets_p, 100000, len(data[0]), [30], len(targets[0]), True) #print("PCEP: ", pcep[2]) cnf_rule = MultiOutLNN.ExtractRules(len(data[0]), cnf, ["OR", "AND"]) dnf_rule = MultiOutLNN.ExtractRules(len(data[0]), dnf, ["AND", "OR"])
Y_train.append(targets[i]) else: X_test.append(data[i]) Y_test.append(targets[i]) return X_train, Y_train, X_test, Y_test acts = [MultiOutLNN.noisy_or_activation, MultiOutLNN.noisy_and_activation] acts_name = ["AND", "OR"] targets, examples = read_data() X_train, Y_train, X_test, Y_test = split_data(examples, targets, 0.7) res = MultiOutLNN.train_lnn(np.array(X_train), np.array(Y_train), 600000, len(examples[0]), [60], 3, acts_name, acts, False) ##rule = LNNWithNot.ExtractRules(len(examples[0]), res, acts_name) ##print(rule) ##print("-- Evaluating --") ##print("Training Set, Size = ", len(X_train)) print( "Network Wrong: ", MultiOutLNN.run_lnn(X_train, Y_train, res, len(examples[0]), [30], 1, acts, False)) ##print("Rule Wrong: ", LNNWithNot.test(rule[0], X_train, Y_train)) ##print() ##print("Testing Set, Size = ", len(X_test)) ##print("Network Wrong: ", LNNWithNot.run_lnn(X_test, Y_test, res, len(examples[0]), [30], 1, acts)) ##print("Rule Wrong: ", LNNWithNot.test(rule[0], X_test, Y_test))
X_train, Y_train, X_test, Y_test = split_data(data, targets, 0.7) X_train = X_train[0:55000] / 255 X_test = X_test[0:55000] / 255 num_inputs = len(X_train[0]) num_outputs = len(Y_train[0]) ## CONFIGURATION ## hidden_layers = [] activations = [MultiOutLNN.noisy_and_activation] iterations = len(X_train) * 30 ## ## ## ## ## ## ## res = MultiOutLNN.train_lnn(X_train, Y_train[0:55000], iterations, num_inputs, hidden_layers, num_outputs, activations, True) #rule = MultiOutLNN.ExtractRules(len(X_train[0]), res, ["OR", "AND"]) #print(len(rule)) #for i in range(len(rule)): # print(i) # print(rule[i]) # print() print("Training") print("Total Number Of Samples: ", len(X_train)) print( "Network Wrong: ", MultiOutLNN.run_lnn(X_train, Y_train[0:55000], res, activations, True) / 55000)
import SPECTData import sys sys.path.append('../../lib/') import numpy as np import MultiOutLNN train, test = SPECTData.read_data() activations =[MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation] net = MultiOutLNN.train_lnn(train[0], train[1], 1000 * len(train[0]), len(train[0][0]), [30], 1, [MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation], True) wrong = MultiOutLNN.run_lnn(test[0], test[1], net, activations, True) er = wrong/len(test[0]) print(er)
if not os.path.exists("./" + name): os.makedirs(name) f = open("./{}/{}".format(name, out), 'w+') f.close() print() print("Starting Experement") #send_notification_via_pushbullet("Experement ({} : {}): {}".format(job_id, task_id, name), "Starting") for i in range(num): gc.collect() print("Starting Iteration: ", i) with tf.Graph().as_default(): res = MultiOutLNN.train_lnn(X_train, Y_train, iterations, num_inputs, layers, num_outputs, activations, add_not, lsm) training_wrong = MultiOutLNN.run_lnn(X_train, Y_train, res, num_inputs, layers, num_outputs, activations, add_not) testing_wrong = MultiOutLNN.run_lnn(X_test, Y_test, res, num_inputs, layers, num_outputs, activations, add_not) tf.reset_default_graph() training_error_rate = training_wrong / len(X_train) testing_error_rate = testing_wrong / len(X_test) print("Training Error Rate: {}%".format(training_error_rate)) print("Testing Error Rate: {}%".format(testing_error_rate))
import os import numpy as np import ReadLensesData import ConvertData def to_one_hot(val, m): res = np.zeros(m) res[val-1] = 1 return res data, targets = ReadLensesData.read_data() data = np.array(data) targets = np.array([to_one_hot(v, 3) for v in targets]) res = MultiOutLNN.train_lnn(data, targets, int(140000 * 1), len(data[0]), [30], len(targets[0]), [MultiOutLNN.noisy_and_activation, MultiOutLNN.noisy_or_activation], True) rule = MultiOutLNN.ExtractRules(len(data[0]), res, ["AND", "OR"]) print(len(rule)) for i in range(len(rule)): print(i) print(rule[i]) print() print(data) print(targets) print(MultiOutLNN.test(rule, data, targets))
import tensorflow as tf import random import numpy as np import matplotlib.pyplot as plt import sys sys.path.append('../../lib/') import MultiOutLNN network = np.load('network.npy') rules = MultiOutLNN.ExtractFuzzyRules(8, network, ['AND', 'OR'], 0.1, 2, False) print() print() for i in range(len(rules)): print(i) print(rules[i]) print()
for i in range(len(data)): #print() #print(data[i]) ex = np.concatenate([data[i], 1 - data[i]]) #print(ex) data_prime.append(ex) data_prime = np.array(data_prime) res = [] for i in range(1): print("Experement: ", i, end=' ') X_train, Y_train, X_test, Y_test = split_data(data_prime, targets, 0.7) net = MultiOutLNN.train_lnn(X_train, Y_train, iterations, 8, np.copy(hidden_layers).tolist(), 3, activations, False) wrong = MultiOutLNN.run_lnn(X_test, Y_test, net, activations, False) er = wrong / len(X_test) print(" -> ", er) res.append(er) rules = MultiOutLNN.ExtractFuzzyRules(len(X_train[0]), net, ['OR', 'AND'], 0.5, 2, False) print() print() for i in range(len(rules)): print(i) print(rules[i])