def train_model(model_name):
    """
        train a model and save it to folder '/trained_model/model_name'
    """
    from tensorflow.examples.tutorials.mnist import input_data
    mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
    model = CNNModel(image_size=[28, 28], char_number=10, channel=1)

    model.addLayer(Convolution2D(size=[5, 5], features=16))
    model.addLayer(ReLU())
    model.addLayer(MaxPool(size=[2, 2]))

    model.addLayer(Convolution2D(size=[5, 5], features=16))
    model.addLayer(ReLU())
    model.addLayer(MaxPool(size=[2, 2]))

    model.addLayer(Convolution2D(size=[5, 5], features=16))
    model.addLayer(ReLU())
    model.addLayer(MaxPool(size=[2, 2]))

    model.addLayer(FullyConnected(features=10))
    model.addOutputLayer(Softmax())

    model.train(
        dataset=mnist,
        eval_every=5,
        epochs=500,
        evaluation_size=500,
        batch_size=100,
        optimizer=train.MomentumOptimizer(0.005, 0.9))

    model.test(mnist)
    model_path = "trained_model/" + model_name + "/" + model_name
    model.save(model_path)
Example #2
0
    def train(self,
              dataset,
              eval_every=5,
              epochs=500,
              evaluation_size=500,
              batch_size=100,
              optimizer=train.MomentumOptimizer(0.005, 0.9)):
        train_step = optimizer.minimize(self.loss)
        prediction = argmax(self.model, 1, name="prediction")
        result = equal(argmax(self.labels, 1), prediction, name="result")
        self.accuracy = reduce_mean(cast(result, float32), name="accuracy")
        train_loss = []
        train_accuracy = []
        test_accuracy = []

        global_variables_initializer().run()

        for i in range(epochs):
            # Lay ra batch_size hinh anh tu tap train
            train_batch = dataset.train.next_batch(batch_size)
            train_dict = {
                self.inputs: train_batch[0],
                self.labels: train_batch[1]
            }
            if i % eval_every == 0:

                # Cu sau eval_every buoc lap thi test mot lan
                test_batch = dataset.test.next_batch(evaluation_size)
                temp_train_loss = self.loss.eval(feed_dict=train_dict)
                temp_train_accuracy = self.accuracy.eval(feed_dict=train_dict)
                temp_test_accuracy = self.accuracy.eval(
                    feed_dict={
                        self.inputs: test_batch[0],
                        self.labels: test_batch[1]
                    })

                print(
                    'Epoch # %d, Train Loss: %g. Train Accuracy (Test Accuracy): %g (%g)'
                    % (i, temp_train_loss, temp_train_accuracy,
                       temp_test_accuracy))

                # Luu cac gia tri de ve bieu do
                train_loss.append(temp_train_loss)
                train_accuracy.append(temp_train_accuracy)
                test_accuracy.append(temp_test_accuracy)

            # Chay thuat toan toi uu ham mat mat
            self.sess.run(train_step, feed_dict=train_dict)

        # Show plots
        loss_per_epoch(epochs, eval_every, train_loss)
        train_test_accuracy(epochs, eval_every, train_accuracy, test_accuracy)
    def train(self, cases, labels):
        '''训练'''
        limit = self.init_temp['max_itera']
        learn_rate = self.init_temp['dg_initial'] * 0.00000001
        self.loss = reduce_mean(
            reduce_sum(square((self.label_layer - self.output_layer)),
                       reduction_indices=[1]))

        learning_rate = placeholder(float32, shape=[])
        #self.train_step = #tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.loss)
        self.train_step = train.MomentumOptimizer(learning_rate=learning_rate,
                                                  momentum=0.9,
                                                  use_locking=False,
                                                  use_nesterov=True).minimize(
                                                      self.loss)  #动量优化器
        self.session.run(global_variables_initializer())

        next_done = 0
        starttime = datetime.now()
        for i in range(limit):
            self.session.run(self.train_step,
                             feed_dict={
                                 self.input_layer: cases,
                                 self.label_layer: labels,
                                 learning_rate: learn_rate
                             })

            if i % 2 == 0:
                if not GlobalMap().get('running_global'):
                    break
                next_done = 1
                next_times = i + 1
                train_0 = self.train_step
                loss0 = self.session.run(self.loss,
                                         feed_dict={
                                             self.input_layer: cases,
                                             self.label_layer: labels
                                         })

            #更新前台和处理数据
            if next_done == 1 and i % next_times == 0:
                loss1 = self.session.run(self.loss,
                                         feed_dict={
                                             self.input_layer: cases,
                                             self.label_layer: labels
                                         })
                GlobalMap().set(loss_now=loss0,
                                times=i,
                                used_time=(datetime.now() - starttime).seconds)
                if loss1 < self.init_temp['precision']:
                    break

                if (i + 1) % 50 == 0 and self.init_temp['fix_auto'] and (
                        loss0 - loss1 < 0.1 or isnan(loss1)):
                    self.auto_times += 1
                    #print(self.auto_times)
                    GlobalMap().set(progress_num=5 + i * 100 // limit +
                                    10)  #进度条
                    if self.auto_times > 8:
                        break

                if loss1 < loss0:  #在动量的基础上调整学习率
                    learn_rate *= 1.0 + self.init_temp['dg_adaptive']
                else:
                    learn_rate *= 1.0 - self.init_temp['dg_adaptive']
                    self.train_step = train_0
                    #print("1:", loss1)
                #print("shu:",learn_rate)
                next_done = 0
        self.saver.Saver().save(self.session, self.ckpt_path)