Пример #1
0
class Test:

    def __init__(self):

        conf_path_cnn = '/src/cnn/conf/cnn.yaml'
        with open(conf_path_cnn, 'r') as fd:
            self.conf = yaml.safe_load(fd)

        self.batch_size = self.conf['testing']['batch_size']
        self.checkpoint_path = self.conf['misc']['checkpoint_path']

        self.logger = Logger
        self.preProcessor = PreProcessor(Logger)
        self.cnn = Cnn(Logger, conf_path_cnn, testing=True)
        self.sess = tf.compat.v1.Session()

        self.start()

    def start(self):
        test_x, test_y = self.preProcessor.get_data(testing=True)

        self.logger.info('testing...')

        # restore model
        self.sess = tf.compat.v1.Session()
        saver = tf.compat.v1.train.Saver()
        model_path = self.conf['testing']['model']
        saver.restore(self.sess, model_path)
        self.logger.info('{0} restored'.format(model_path))

        # test
        self.sess.run(tf.compat.v1.local_variables_initializer())
        pred_y, loss, acc = self.cnn.analyze_epoch(self.sess, test_x, test_y)
        self.logger.info('test_loss: {0}, test_acc: {1}'.format(loss, acc))

        self.sess.close()
        return pred_y
Пример #2
0
class Train:
    def __init__(self):

        conf_path_cnn = '/src/cnn/conf/cnn.yaml'
        with open(conf_path_cnn, 'r') as fd:
            self.conf = yaml.safe_load(fd)

        # load configuration
        self.num_epochs = self.conf['training']['epochs']
        self.batch_size = self.conf['training']['batch_size']
        self.checkpoint_path = self.conf['misc']['checkpoint_path']

        self.logger = Logger
        self.preProcessor = PreProcessor(Logger)
        self.cnn = Cnn(Logger, conf_path_cnn)
        self.sess = tf.compat.v1.Session()

        self.start()

    def start(self):
        train_x, train_y, val_x, val_y = self.preProcessor.get_data()

        self.sess.run(tf.compat.v1.global_variables_initializer())
        self.sess.run(tf.compat.v1.local_variables_initializer())

        saver = tf.compat.v1.train.Saver()
        num_iterations = int(len(train_x) / self.batch_size)

        self.logger.info('training...')
        for epoch in range(self.num_epochs):

            for iter_ in range(num_iterations):
                batch_x = train_x[iter_ * self.batch_size:(iter_ + 1) *
                                  self.batch_size, :]
                batch_y = train_y[iter_ * self.batch_size:(iter_ + 1) *
                                  self.batch_size, :]

                self.sess.run(self.cnn.training,
                              feed_dict={
                                  self.cnn.layer_input: batch_x,
                                  self.cnn.ground_truth: batch_y
                              })

            # train loss
            _, loss_train, acc_train = self.cnn.analyze_epoch(
                self.sess, train_x, train_y)

            # val loss
            _, loss_val, acc_val = self.cnn.analyze_epoch(
                self.sess, val_x, val_y)

            self.logger.info(
                'epoch: {0}/{1}, loss_train: {2}, acc_train: {3}, loss_val: {4}, acc_val: {5}'
                .format(epoch + 1, self.num_epochs, loss_train, acc_train,
                        loss_val, acc_val))

        # save model
        saved_path = saver.save(self.sess,
                                '{0}model.ckpt'.format(self.checkpoint_path))
        self.logger.info('model saved in {0}'.format(saved_path))
        self.sess.close()