Beispiel #1
0
def main():
    hps = HParams(batch_nums=50,
                  num_classes=10,
                  deep_net_fkn=20,
                  img_depth=1,
                  img_width=28,
                  des_img_size=96,
                  filter_in_channel=1)
    ModelUtilv3s1.init_peizhi(peizhifilename=peizhi_filename,
                              peizhidict=peizhi_dict)

    while True:
        print("start training")
        mode = 'train'
        startTrain(hps, mode=mode)
        print("training end")

        print("start test")
        mode = 'test'

        startTest(hps, mode=mode)

        print("test end")

        with open(peizhi_filename, mode='rb') as rfobj:
            peizhi = pickle.load(rfobj)
        if peizhi['max_test_acc'] >= 0.999:
            print("already over best test acc, now test acc is ",
                  peizhi['max_test_acc'])
            break
Beispiel #2
0
    def create_deep_res_body(
            self,batch_num, carriage_block_num, inputs, is_training_ph,
            layername="layer", activateFunc=tf.nn.relu, stride=[1, 1, 1, 1]):
        """

        最后的输出是4*4*deep_net_fkn*(2**4)
        """
        kernalWidth = 3

        outputs = inputs
        for it in range(len(carriage_block_num)):
            # if it == 0:
            #     outputs = ModelUtilv3s1.add_overlap_maxpool(outputs)
            # else:
            #     outputs = ModelUtilv3s1.add_averagepool_layer(outputs)


            outputs = ModelUtilv3s1.add_maxpool_layer(outputs)


            tscope = "carriage_" + str(it)
            outputs = ModelUtilv3s1.add_building_block_carriage(
                batch_num, self.hps.deepk[it], carriage_block_num[it], outputs,
                kernalWidth, is_training_ph,
                scope=tscope, layername=layername, activateFunc=activateFunc,
                stride=stride)
            tscope = "carriage" + str(it) + "des"
            outputs = ModelUtilv3s1.building_block_desc(
                outputs, is_training_ph, scope=tscope, layername=layername,
                activateFunc=activateFunc, stride=stride, descrate=self.hps.descrate[it])
            ###################3
            # print outputs.get_shape().as_list()
            ############33333333333333
        return outputs
def startTest(hps, mode, gps, msg):
    # images, labels = mnist.test.next_batch(hps.batch_nums)
    tf.reset_default_graph()

    xp = tf.placeholder(
        tf.float32,
        [hps.batch_nums, hps.des_img_size, hps.des_img_size, hps.img_depth])
    yp = tf.placeholder(tf.float32, [None, 10])

    model = mnistModel.MnistModel(hps, xp, yp, mode)
    model.create_graph()

    # 训练一遍需要的步数
    epochTrainNums = int(10000 // hps.batch_nums)
    allrightnums = 0
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()

        saver.restore(sess=sess, save_path=gps.save_file_name)

        with open(gps.peizhi_filename, mode='rb') as rfobj:
            peizhi = pickle.load(rfobj)
            lrn_rate = peizhi['lrn_rate']

        base_step = peizhi['test_step']
        end_step = int(base_step + epochTrainNums)
        for itstep in range(base_step, end_step):
            images, labels = mnist.test.next_batch(hps.batch_nums)
            images = batch_imgs_process_test(images, hps)

            feed_dict = {xp: images, yp: labels, model.is_training_ph: False}
            (inlabels,
             outprediction) = sess.run([model.labes, model.predictions],
                                       feed_dict=feed_dict)
            itrightnums = ModelUtilv3s1.get_test_right_num(
                outprediction, inlabels)
            allrightnums += itrightnums

        test_acc = float(allrightnums) / 10000
        lrn_rate = ModelUtilv3s1.down_learning_rate(test_acc, lrn_rate)
        ModelUtilv3s1.update_peizhi(gps.peizhi_filename, 'lrn_rate', lrn_rate)
        ModelUtilv3s1.update_peizhi(gps.peizhi_filename, 'test_step', end_step)

        if test_acc > peizhi['max_test_acc']:
            ModelUtilv3s1.update_peizhi(gps.peizhi_filename, 'max_test_acc',
                                        test_acc)
            ModelUtilv3s1.move_variable_from_src2des(gps.save_dirname,
                                                     gps.des_save_dirname)

            email = coreMailUtil.Email('xxx', 'xxx@')
            to_list = ['*****@*****.**']
            content = msg + "<p>max_acc:" + str(test_acc) + "</p>"
            email.send_mail_html(to_list, 'OCR latest acc', content)

        msg = "test acc:%.5f         now learning rate:%f" % (test_acc,
                                                              lrn_rate)

        logger.showAndLogMsg(msg)
def main():
    hps = HParams(batch_nums=50,
                  num_classes=10,
                  deep_net_fkn=30,
                  img_depth=1,
                  img_width=28,
                  deepk=[3, 2.9, 2.8, 2.8],
                  carriage_block_num=[2, 2, 2, 2],
                  des_img_size=96,
                  descrate=[0.6, 0.6, 0.6, 0.6])

    save_file_name = '/home/allen/work/variableSave/OCRpro1/temp/deepres.ckpy'
    des_save_dirname = '/home/allen/work/variableSave/OCRpro1/mnist/mnistCompRes'
    save_dirname = '/home/allen/work/variableSave/OCRpro1/temp/'

    gps = GParams(
        save_file_name=save_file_name,
        des_save_dirname=des_save_dirname,
        save_dirname=save_dirname,
        peizhi_filename=
        '/home/allen/work/chengxu/OCR/OCRpro1/mnistCompRes/peizhi.xml')

    ModelUtilv3s1.init_peizhi(peizhifilename=gps.peizhi_filename,
                              peizhidict=peizhi_dict)

    msg = "peizhi\nhps:"+str(hps) +\
          "\ngps:"+str(gps)+ \
          "\n max pool"+\
          "deepk and descrate can be fractional, use Relu, "+ \
          "\nimg random crop scale rotate"
    logger.showAndLogMsg(msg)

    while True:
        print("start training")
        mode = 'train'
        trainNumsBeforeValid = 4
        startTrain(trainNumsBeforeValid, hps, mode=mode, gps=gps)
        print("training end")

        print("start test")
        mode = 'test'

        startTest(hps, mode=mode, gps=gps, msg=msg)

        print("test end")

        with open(gps.peizhi_filename, mode='rb') as rfobj:
            peizhi = pickle.load(rfobj)
        if peizhi['max_test_acc'] >= 0.999:
            print("already over best test acc, now test acc is ",
                  peizhi['max_test_acc'])
            break
Beispiel #5
0
    def create_graph(self):
        # create graph start
        self.is_training_ph = tf.placeholder(tf.bool)


        resnet = self.create_resnet(self._images, self.is_training_ph, activateFunc=tf.nn.relu)
        fcl1_inputs, fcl1_in_features = ModelUtilv3s1.conv2fc(resnet)
        # set outputs features
        outputs_features = self.hps.num_classes
        outputs = tf.layers.dense(fcl1_inputs,outputs_features)

        self.predictions = tf.nn.softmax(logits=outputs)

        self.loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(
                logits=outputs, labels=self.labes))

        if self.mode == "train":
            self.learning_rate = tf.placeholder(tf.float32)
            self.train_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(loss=self.loss)
Beispiel #6
0
def startTest(hps, mode):
    tf.reset_default_graph()
    with open(peizhi_filename, mode='rb') as rfobj:
        peizhi = pickle.load(rfobj)
    lrn_rate = peizhi['lrn_rate']

    xp = tf.placeholder(tf.float32, [
        hps.batch_nums, hps.des_img_size, hps.des_img_size,
        hps.filter_in_channel
    ])
    yp = tf.placeholder(tf.float32, [None, 10])

    model = deepcnetModel.DeepCModel(hps, xp, yp, mode, peizhi['train_step'])
    model.create_graph()

    # 训练一遍需要的步数
    epochTrainNums = int(10000 // hps.batch_nums)
    allrightnums = 0
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()

        saver.restore(sess=sess, save_path=save_file)

        for itstep in range(0, epochTrainNums):
            images, labels = mnist.test.next_batch(hps.batch_nums)
            images = batch_imgs_process(images, hps)

            feed_dict = {xp: images, yp: labels, model.is_training_ph: False}
            (inlabels,
             outprediction) = sess.run([model.labes, model.predictions],
                                       feed_dict=feed_dict)
            itrightnums = ModelUtilv3s1.get_test_right_num(
                outprediction, inlabels)
            allrightnums += itrightnums

        test_acc = float(allrightnums) / 10000
        lrn_rate = ModelUtilv3s1.down_learning_rate(test_acc, lrn_rate)
        ModelUtilv3s1.update_peizhi(peizhi_filename, 'lrn_rate', lrn_rate)

        if test_acc > peizhi['max_test_acc']:
            ModelUtilv3s1.update_peizhi(peizhi_filename, 'max_test_acc',
                                        test_acc)

        msg = "test acc:%.5f         now learning rate:%f" % (test_acc,
                                                              lrn_rate)

        logger.showAndLogMsg(msg)
Beispiel #7
0
    def create_deep_res_head(self, inputx, is_training_ph, activateFunc=tf.nn.relu):
        """
        会进行一次max pooling,
        一层卷积层,一层max pooling,一个building block serial组成
        :param inputx: inputs shape is 4D tensor, value is arrange(0,1.0),或者(0,255)
        :param same_width_block_num:

        :return:
        """



        # kernal_width = 5
        kernal_width = 3
        depth = self.hps.deep_net_fkn

        # conv_layer1 is 64*64*deep_net_fkn
        outputs = ModelUtilv3s1.add_BN_conv_layer(
            inputx, kernal_width,
            depth, is_training_ph, scope="reshead",
            activateFunc=activateFunc,stride=[1,1,1,1])


        return outputs
Beispiel #8
0
#coding=utf-8

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from mnistCompRes.mnisttool import ModelUtilv3s1

# 方法一:卷积-BN-池化-激活,test-acc=0.9649
# 方法二:卷积-BN-激活函数-池化,test-acc=0.9654
#create data
mnist = input_data.read_data_sets("../MNIST_DATA/", one_hot=True)

logger = ModelUtilv3s1.MyLog(
    '/home/allen/work/data/resultlog/lenet5/lenet5.txt')
logDir = '/home/allen/work/data/resultlog/lenet5/summary/trainBN_cor'

#set global features
img_size = 28
img_depth = 1

global_steps = tf.Variable(0, trainable=False)

# tf.reset_default_graph()
keep_prob = tf.placeholder(tf.float32)
train_nums = 10000
batch_size = 50

save_file = "/home/allen/work/variableSave/lenet5/lenet1.ckpt"
learning_rate = 1e-4

activate_func = tf.nn.relu
in_training = tf.placeholder(tf.bool)
Beispiel #9
0
def startTrain(hps, mode):
    tf.reset_default_graph()
    with open(peizhi_filename, mode='rb') as rfobj:
        peizhi = pickle.load(rfobj)

    xp = tf.placeholder(
        tf.float32,
        [hps.batch_nums, hps.des_img_size, hps.des_img_size, hps.img_depth])
    yp = tf.placeholder(tf.float32, [None, 10])
    model = deepcnetModel.DeepCModel(hps, xp, yp, mode, peizhi['train_step'])
    model.create_graph()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()

        if not peizhi['is_restore']:
            sess.run(model.init)
        else:
            saver.restore(sess, save_file)

        base_step = sess.run(model.step)
        end_step = int(base_step + 10000 + 1)
        for itstep in range(base_step, end_step):
            if (itstep % 10) <= 8:
                images, labels = mnist.train.next_batch(hps.batch_nums)
            else:
                images, labels = mnist.validation.next_batch(hps.batch_nums)

            images = batch_imgs_process(images, hps)

            feed_dict = {
                xp: images,
                yp: labels,
                model.learning_rate: peizhi['lrn_rate'],
                model.is_training_ph: True
            }
            (inlabels, outprediction, cost, _) = sess.run(
                [model.labes, model.predictions, model.loss, model.train_op],
                feed_dict=feed_dict)

            if itstep % 100 == 0:
                (inlabels, outprediction, cost, _,
                 summary) = sess.run([
                     model.labes, model.predictions, model.loss,
                     model.train_op, model.merged
                 ],
                                     feed_dict=feed_dict)
                trainacc = ModelUtilv3s1.get_accurate(outprediction, inlabels)
                msg = "trainstep:%5d  loss:%e  train acc:%.5f" % (itstep, cost,
                                                                  trainacc)

                train_writer.add_summary(summary, itstep)

                if itstep % 500 == 0:
                    logger.showAndLogMsg(msg)
                else:
                    logger.log_message(msg)

        print("before save")

        saver.save(sess=sess, save_path=save_file)
        print("after save")
        ModelUtilv3s1.update_peizhi(peizhi_filename, 'is_restore', True)
        end_step = sess.run(model.step)
        ModelUtilv3s1.update_peizhi(peizhi_filename, 'train_step', end_step)
Beispiel #10
0
from collections import namedtuple
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import pickle

from mnistCompRes.mnisttool import imgUtil
from mnistCompRes.mnisttool import ModelUtilv3s1
from deepCNet.modelUtil import deepcnetModel

HParams = namedtuple(
    'HParams', 'batch_nums, num_classes, deep_net_fkn,'
    'img_depth, img_width, des_img_size, filter_in_channel')

logger = ModelUtilv3s1.MyLog(
    '/home/allen/work/data/resultlog/deepcNet/deepcnet20.txt')
logDir = '/home/allen/work/data/resultlog/deepcNet/summary/deepcs96f20'

mnist = input_data.read_data_sets("../MNIST_DATA/", one_hot=True)
save_file = "/home/allen/work/variableSave/deepcnet/deepcnet.ckpt"
peizhi_filename = "/home/allen/work/chengxu/OCR/OCRpro1/deepCNet/peizhi.xml"

peizhi_dict = {
    'lrn_rate': 1e-3,
    'is_restore': False,
    'train_step': 0,
    'max_test_acc': 0
}
train_writer = tf.summary.FileWriter(logDir)


def startTrain(hps, mode):
def startTrain(trainepochnums, hps, mode, gps):
    # images,labels = mnist.train.next_batch(hps.batch_nums)
    tf.reset_default_graph()

    xp = tf.placeholder(
        tf.float32,
        [hps.batch_nums, hps.des_img_size, hps.des_img_size, hps.img_depth])
    yp = tf.placeholder(tf.float32, [None, 10])
    model = mnistModel.MnistModel(hps, xp, yp, mode)
    model.create_graph()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()
        with open(gps.peizhi_filename, mode='rb') as rfobj:
            peizhi = pickle.load(rfobj)

        if not peizhi['is_restore']:
            init = tf.global_variables_initializer()
            sess.run(init)
        else:
            saver.restore(sess, gps.save_file_name)

        base_step = int(peizhi['train_step'])
        end_step = int(base_step + 50000 * trainepochnums / hps.batch_nums + 1)
        # end_step = int(base_step + 5000*trainepochnums / hps.batch_nums +1)
        for itstep in range(base_step, end_step):
            if (itstep % 10) <= 8:
                images, labels = mnist.train.next_batch(hps.batch_nums)
            else:
                images, labels = mnist.validation.next_batch(hps.batch_nums)

            images = batch_imgs_process_train(images, hps)

            feed_dict = {
                xp: images,
                yp: labels,
                model.learning_rate: peizhi['lrn_rate'],
                model.is_training_ph: True
            }
            (inlabels, outprediction, cost, _) = sess.run(
                [model.labes, model.predictions, model.loss, model.train_op],
                feed_dict=feed_dict)

            if itstep % 100 == 0:
                trainacc = ModelUtilv3s1.get_accurate(outprediction, inlabels)
                msg = "trainstep:%5d  loss:%e  train acc:%.5f" % (itstep, cost,
                                                                  trainacc)

                if itstep % 200 == 0:
                    logger.showAndLogMsg(msg)
                else:
                    logger.log_message(msg)

            # if itstep % 7000 ==0 and itstep > 0:
            #     print "before save"
            #     saver.save(sess=sess, save_path=gps.save_file_name)
            #     print "after save"

        print("before save")
        saver.save(sess=sess, save_path=gps.save_file_name)
        print("after save")
        ModelUtilv3s1.update_peizhi(gps.peizhi_filename, 'is_restore', True)
        ModelUtilv3s1.update_peizhi(gps.peizhi_filename, 'train_step',
                                    end_step)
    'img_depth, img_width, deepk, carriage_block_num,'
    'des_img_size, descrate')

GParams = namedtuple(
    'GParams',
    'save_file_name, des_save_dirname, save_dirname, peizhi_filename')

peizhi_dict = {
    'lrn_rate': 1e-2,
    'is_restore': False,
    'train_step': 0,
    'test_step': 0,
    'max_test_acc': 0
}

logger = ModelUtilv3s1.MyLog(
    '/home/allen/work/data/resultlog/mnistCompRes/mnistCmpResn12.txt')
mnist = input_data.read_data_sets("../MNIST_DATA/", one_hot=True)


def startTrain(trainepochnums, hps, mode, gps):
    # images,labels = mnist.train.next_batch(hps.batch_nums)
    tf.reset_default_graph()

    xp = tf.placeholder(
        tf.float32,
        [hps.batch_nums, hps.des_img_size, hps.des_img_size, hps.img_depth])
    yp = tf.placeholder(tf.float32, [None, 10])
    model = mnistModel.MnistModel(hps, xp, yp, mode)
    model.create_graph()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: