def recognize(sett):
    '''Обучение и тестирование нейронной сети'''

    # Инициализация экземпляра нейроннйо сети
    nn = NN(sett.input_nodes, sett.hidden_nodes, sett.output_nodes, sett.rate)

    # Загрузка тренировочных данных
    training_data = open("dataset/mnist_train_100.csv", 'r')
    training_list = training_data.readlines()
    training_data.close()

    # Обучение
    for _ in range(5):
        for record in training_list:
            all_values = record.split(',')
            inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
            targets = np.zeros(sett.output_nodes) + 0.01
            targets[int(all_values[0])] = 0.99
            nn.train(inputs, targets)

    # Загрзука тестовых данных
    test_data = open("dataset/mnist_test_10.csv", 'r')
    test_list = test_data.readlines()
    test_data.close()

    score_card = []

    # Тестирование сети
    for record in test_list:
        all_values = record.split(',')
        correct_val = int(all_values[0])
        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
        outputs = nn.query(inputs)
        network_val = np.argmax(outputs)
        score_card.append(1 if correct_val == network_val else 0)

    # Загрузка входного изображения и преобразование его в массив
    img_array = smc.imread("numbs/new.png", flatten=True)
    img_data = 255.0 - img_array.reshape(784)
    img_data = (img_data / 255.0 * 0.99) + 0.01

    # Выходные сигналы
    outputs_new = nn.query(img_data)

    # Значение цифры, которое имеет наибольший процент сходства с изображением
    network_val = np.argmax(outputs_new)
    return network_val
Esempio n. 2
0
def get_fitness(individual):
    if ''.join(
            str(e) for e in list(individual.values())
    ) in fitness_stored:  # I take the values of of one individual since they define the individual and convert them into a list
        fitness = fitness_stored[''.join(
            str(e) for e in list(individual.values())
        )]  # which I then convert into a string using list comprehension such that I can use this string as the
    else:  # key where the corresponding value is the fitness value of the individual
        nn = NeuralNetwork(individual["activation_function"],
                           individual["number_of_neurons"],
                           individual["optimizer"], individual["dropout"],
                           individual["epoch_size"],
                           individual["learning_rate"],
                           individual["batch_size"])
        fitness = nn.build()
        fitness_stored[''.join(
            str(e) for e in list(individual.values())
        )] = fitness  # I store the fitness value of the individual which is not already in the stored_fitness dictionary
    return fitness
Esempio n. 3
0

def vp_start_gui():
    """6Starting point when module is the main routine."""
    global w, root
    root = tk.Tk()
    top = Main(Login.MySQL.get_name(), root)
    Main_support.init(root, top)
    root.mainloop()


w = None
total_gems = 0
ExcelSheet = FileHandling()
ClickPicture = ImageCapture()
ColourDetection = NeuralNetwork()
Arduino = SerialCommunication()


def create_Main(root):
    """Starting point when module is imported by another program."""
    global w, rt
    rt = root
    w = tk.Toplevel(root)
    top = Main(Login.MySQL.get_name(), w)
    Main_support.init(w, top)
    return w, top


def destroy_Main():
    global w
Esempio n. 4
0
    n_features = 28 * 28  # 与手写图片尺寸相对应
    n_hidden1 = 300  # 定义包含输出层在内的三个层
    n_hidden2 = 100
    n_classes = 10  # 数字分类问题,最后一层是 10个输出
    layers = []
    relu = NN_activators.ReLUActivator()
    layers.append(NN_Layer.Layer(n_features, n_hidden1, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden2, n_classes))
    return layers


# end

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
X_train, Y_train = mnist.train.images, mnist.train.labels
X_test, Y_test = mnist.test.images, mnist.test.labels

layers = create_layers()
loss = NN_Loss.LogarithmicLoss()

model = NeuralNetwork(layers, loss)
model.fit(X_train, Y_train, 50000, 0.01)
V = model.predict(X_test)  # 注意:输出的是与 OneHot向量同维度的一个矩阵(一个测试点对应一个 OneHot向量)

PROBA = NN_Loss.softmax(V)  # 得到 softmax矩阵
y_pred = np.argmax(PROBA, axis=1)

accuracy = accuracy_score(np.argmax(Y_test, axis=1), y_pred)
print("accuracy: {}.".format(accuracy))
Esempio n. 5
0
 def __init__(self, num_actions, name, tau=0.001):
     NeuralNetwork.__init__(self, num_actions, name)
     self.tau = tau
     self._associate = self._register_associate()
Esempio n. 6
0
# determine number of classes
n_class = len(np.unique(Y))

#define neural network model-1 parameters
hidden_layers = [
    5, 5
]  # hidden layer size is passed as a list ith position value determines number of neuron in ith layer
l_rate = 0.6  # learning rate
epochs = 1000  # number of training epochs

#check the timing of model-1
t0 = time.time()  #start time

#define model-1
model1 = NN(input_layer_size=m,
            output_layer_size=n_class,
            hidden_layer_size=hidden_layers)
model1.train(X_train, Y_train, l_rate=l_rate, n_epochs=epochs)
#end time
t1 = time.time()
total_time = t1 - t0

#prediction of our model on test and training data
Y_train_predict = model1.predict(X_train)
Y_test_predict = model1.predict(X_test)
accuracy_train = 100 * np.sum(Y_train == Y_train_predict) / len(Y_train)
accuracy_test = 100 * np.sum(Y_test == Y_test_predict) / len(Y_test)
print("total time for training model-1:", total_time)
print("accuracy of model-1 on training data", accuracy_train)
print("accuracy of model-1 on test data", accuracy_test)
Esempio n. 7
0
    layers = []
    relu = NN_activators.ReLUActivator()
    layers.append(NN_Layer.Layer(n_inputs, n_hidden1, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden2, n_outputs))
    return layers


# end

housing = fetch_california_housing()
X = housing.data
y = housing.target.reshape(-1, 1)

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.5,
                                                    random_state=0)

X_train = process_features(X_train)
X_test = process_features(X_test)

layers = create_layers()
loss = NN_Loss.SquaredLoss()  # 回归问题,损失函数定义为平方损失函数

model = NeuralNetwork(layers, loss)
model.fit(X_train, y_train, 100000, 0.01)
y_pred = model.predict(X_test)

print("r2_score: {}.".format(r2_score(y_test, y_pred)))
Esempio n. 8
0
 def __initData(self):
     self.__paintBoard = PaintBoard(self)
     self.__model = NeuralNetwork()
Esempio n. 9
0
class MainWidget(QWidget):
    def __init__(self, Parent=None):
        super().__init__(Parent)
        self.__result = -1
        self.__initData()
        self.__initView()

    def __initData(self):
        self.__paintBoard = PaintBoard(self)
        self.__model = NeuralNetwork()

    def __initView(self):
        self.setFixedSize(600, 400)
        self.setWindowTitle('Application')

        main_layout = QHBoxLayout(self)
        main_layout.setSpacing(10)
        main_layout.addWidget(self.__paintBoard)

        sub_layout = QVBoxLayout()
        sub_layout.setContentsMargins(10, 10, 10, 10)
        sub_layout.setSpacing(30)

        self.__btn_Clear = QPushButton('clear')
        self.__btn_Clear.setParent(self)
        self.__btn_Clear.clicked.connect(self.__paintBoard.clear)
        sub_layout.addWidget(self.__btn_Clear)

        self.__btn_Predict = QPushButton('predict')
        self.__btn_Predict.setParent(self)
        self.__btn_Predict.clicked.connect(self.predict)
        sub_layout.addWidget(self.__btn_Predict)

        self.__btn_Quit = QPushButton('quit')
        self.__btn_Quit.setParent(self)
        self.__btn_Quit.clicked.connect(self.quit)
        sub_layout.addWidget(self.__btn_Quit)

        self.__lb_Result_Tip = QLabel()
        font = QFont()
        font.setPointSize(24)
        self.__lb_Result_Tip.setFont(font)

        self.__lb_Result_Tip.setText('result')
        self.__lb_Result_Tip.setParent(self)
        sub_layout.addWidget(self.__lb_Result_Tip)

        self.__lb_Result = QLabel()
        font = QFont()
        font.setPointSize(30)
        self.__lb_Result.setFont(font)
        self.__lb_Result.setParent(self)
        self.__lb_Result.setAlignment(Qt.AlignHCenter)
        sub_layout.addWidget(self.__lb_Result)

        main_layout.addLayout(sub_layout)

    def quit(self):
        self.close()

    def predict(self):
        image = self.__paintBoard.getImage()
        pil_img = ImageQt.fromqimage(image)
        pil_img = pil_img.resize((28, 28), Image.ANTIALIAS)
        # pil_img.save('./images/test66.png')
        # pil_img.show()

        img_array = np.array(pil_img.convert('L')).reshape(1, 784)
        # img_array = np.hstack([img_array, [1.0]]).reshape((1, 785))

        # display image
        plt.imshow(img_array.reshape(28, 28), cmap="binary")
        # plt.imshow(pil_img, cmap="binary")
        plt.show()
        # fig = plt.figure(figsize=(6, 6))
        # fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
        # # 绘制数字:每张图像8*8像素点
        # for i in range(64):
        #     ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
        #     ax.imshow(self.xtest[i].reshape(28, 28), cmap=plt.cm.binary, interpolation='nearest')
        #     # 用目标值标记图像
        #     ax.text(0, 7, str(self.ytest[i]))
        # plt.show()

        self.__result = self.__model.predict(img_array)
        print("result: %d" % self.__result)
        self.__lb_Result.setText("%d" % self.__result)
Esempio n. 10
0
        all_values = record.split(',')

        correct_label = int(all_values[0])

        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01

        outputs = nn.query(inputs)

        label = np.argmax(outputs)

        if label == correct_label:
            score.append(1)
        else:
            score.append(0)

    score_a = np.asarray(score)

    print("performance = {:.2f}%".format((score_a.sum() / score_a.size) * 100))


#Main
input_nodes = 784
hidden_node = 200
output_nodes = 10

learning_rate = 0.1

nn = NeuralNetwork(input_nodes, hidden_node, output_nodes, learning_rate)

train(nn)
Esempio n. 11
0
    layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden2, n_hidden3, activator=relu))
    layers.append(NN_Layer.Layer(n_hidden3, n_classes, activator=ide))
    return layers
# end



X, y = get_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)

encoder = MinMaxScaler()
X_train = encoder.fit_transform(X_train)
X_test = encoder.fit_transform(X_test)

encoder = OneHotEncoder()
Y_train = encoder.fit_transform(y_train)                # 仅将训练数据集的标签转换成 OneHot向量构成的矩阵

layers = create_layers()
loss = NN_Loss.LogarithmicLoss()                        # 分类问题,使用对数损失函数(目标函数为交叉熵)

model = NeuralNetwork(layers, loss)
model.fit(X_train, Y_train, 70000, 0.02)
VR = model.predict(X_test)                              # 注意:输出的是与 OneHot向量同维度的一个矩阵(一个测试点对应一个 OneHot向量),可能不符合概率要求

PROBA = NN_Loss.softmax(VR)                             # V(R)矩阵需要经过 Softmax变换,得到 softmax矩阵,符合概率要求
y_pred = np.argmax(PROBA, axis=1)                       # 最大分类函数,取预测概率最大的一个类别作为最终预测结果(一个数值,0 ~ 9)

accuracy = accuracy_score(y_test, y_pred)               # 计算准确率
print("accuracy: {}.".format(accuracy))
Esempio n. 12
0
	def __init__(self):
		self.gameBoard = chess.Board()
		self.layers = [[0.980387919287529, 0.6474419063404472, 0.16792691161694762, 0.5769313802043031, 0.6706039285245391, 0.6280155611233161, 0.09935413521220327, 0.40539108950651337, 0.19437617970540655, 0.541066132818385], [0.3473451298742617, 0.8501141804573936, 0.7824274160843238, 0.052648592718530285, 0.42327269978042137, 0.7016450673421375, 0.42626590505168294, 0.33359577103619875, 0.07622579086979042, 0.985727647912295], [0.88931906278011, 0.628743713922838, 0.3981540274891813, 0.468891668222183, 0.691167298136054, 0.06297249411252137, 0.8364138553623081, 0.8479942978770231, 0.32644240807468683, 0.5599095148494678], [0.9986756673045996, 0.46573288240599653, 0.461357607683318, 0.346827106267659, 0.3449875232567178, 0.9954455721205505, 0.46253991983568876, 0.29721978842083807, 0.0950957507425565, 0.03420451491888987], [0.5832844201502072, 0.2370776982800903, 0.9279835018739888, 0.5827135815362862, 0.618530862583544, 0.27904930899243185, 0.82650238979006, 0.8983562631841708, 0.4817792176426975, 0.21735943796434265], [0.15191366101282067, 0.07364579894623668, 0.93869335169828, 0.01935727967136902, 0.702977726136353, 0.7623065807489392, 0.19301101912084462, 0.6607615734264356, 0.02612338496723221, 0.8785271321760161], [0.9062570844987892, 0.2227967058097512, 0.16381440720404794, 0.5181676821854615, 0.9337316438822966, 0.5111125797385804, 0.7988188249341194, 0.5833271374111773, 0.08450548746700881, 0.9078924716631417], [0.43048413322771306, 0.11813628971949208, 0.31569316762605526, 0.4894631574029519, 0.43483676314126607, 0.7092020797311152, 0.47170672101350375, 0.26395016083936573, 0.35220847239627506, 0.24797995656383398], [0.3116708634315597, 0.0029044515964246065, 0.0993391492425112, 0.05894437485358994, 0.06880635746070796, 0.594728349028283, 0.4604875698409675, 0.8972244022860567, 0.8510384632859578, 0.15768611881618033], [0.46841518613203037, 0.1082643910672737, 0.6877236667119417, 0.5183231593413817, 0.2989809779548921, 0.6133323575487193, 0.6165556381998286, 0.24365923346260732, 0.7678036741674775, 0.37120917165465794]]
		self.nn  = NeuralNetwork(self.layers)