Exemple #1
0
 def inference(self, image):
     print "Loading Network.."
     pred_result = []
     image_placeholder = tf.placeholder(tf.float32,
                                        shape=[28, 28, 3],
                                        name='input_image')
     model = CapsNet()
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         weights_dir = self.OBJ_RECOG_ROOT + '/weights'
         supervisor = tf.train.Supervisor(graph=model.graph,
                                          logdir=weights_dir,
                                          save_model_secs=0)
         with supervisor.managed_session(config=tf.ConfigProto(
                 allow_soft_placement=True)) as sess:
             supervisor.saver.restore(
                 sess, tf.train.latest_checkpoint(weights_dir))
             pred = model.inference()
             logits_value = sess.run(
                 [pred], feed_dict={model.image_placeholder: image})
     pred_result = logits_value[0][0]
     result = np.argmax(pred_result, axis=0, out=None)
     obj_num = result[0][0]
     print 'object is ', self.obj_list[obj_num]
     return obj_num
Exemple #2
0
def main():
    trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data('mnist', batch_size, is_training=True)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    model = CapsNet()
    sv = tf.train.Supervisor(graph=model.graph, logdir=logdir, save_model_secs=0)
    with sv.managed_session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        for epoch in range(num_epochs):
            print('Ep %d/%d' % (epoch, num_epochs))
            if sv.should_stop():
                print('Should stop.')
                break
            for step in range(num_tr_batch):
                global_step = epoch * num_tr_batch + step
                # model.X, model.Y, _, _ = data.train.next_batch(batch_size)
                if global_step % 10 == 0:
                    _, loss, acc, summary, margin_loss = sess.run(
                        [model.train_op, model.total_loss, model.accuracy, model.train_summary, model.margin_loss])
                    print('Step %d: loss %f \t acc %f \t margin loss %f' % (global_step, loss, acc, margin_loss))
                    # l, ml, mr, v_length, digit_cap, primary_caps, pre_vec, conv1= sess.run([model.Y, model.max_l,
                    #                                                                  model.max_r, model.v_length,
                    #                                                                  model.digit_caps, model.primary_caps,
                    #                                                                  model.pre_vector, model.conv1])
                    # print(l[0])
                    # # print(v_length[0])
                    # # print(ml[0])
                    # # print(mr[0])
                    # # print(digit_cap[0])
                    # # print(primary_caps[0])
                    # print('conv1')
                    # print(conv1[0][0])

                    sv.summary_writer.add_summary(summary, global_step)
                else:
                    sess.run(model.train_op)
Exemple #3
0
def main(_):
    if cfg.is_training:
        prepare_output_dir()

    graph = tf.Graph()
    with graph.as_default():
        tf.set_random_seed(1)
        model = CapsNet()
        saver = tf.train.Saver()

        if cfg.is_training:
            session = tf.train.MonitoredTrainingSession(hooks=[
                tf.train.NanTensorHook(model.total_loss),
                tf.train.CheckpointSaverHook(
                    checkpoint_dir=cfg.checkpoint_dir,
                    save_steps=cfg.save_checkpoint_steps,
                    saver=saver),
                tf.train.SummarySaverHook(save_steps=cfg.train_sum_freq,
                                          output_dir=LOG_DIR,
                                          summary_op=model.train_summary)
            ], )
            train(model, session)
        else:
            session = tf.train.MonitoredSession()
            evaluation(model, session, saver)
Exemple #4
0
def main(_):
    capsNet = CapsNet(is_training=cfg.is_training)
    tf.logging.info('Graph loaded')
    sv = tf.train.Supervisor(graph=capsNet.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)

    path = cfg.results + '/accuracy.csv'
    if not os.path.exists(cfg.results):
        os.mkdir(cfg.results)
    elif os.path.exists(path):
        os.remove(path)

    fd_results = open(path, 'w')
    fd_results.write('step,test_acc\n')
    with sv.managed_session() as sess:
        num_batch = int(60000 / cfg.batch_size)
        num_test_batch = 10000 // cfg.batch_size
        teX, teY = load_mnist(cfg.dataset, False)
        for epoch in range(cfg.epoch):
            if sv.should_stop():
                break
            for step in tqdm(range(num_batch),
                             total=num_batch,
                             ncols=70,
                             leave=False,
                             unit='b'):
                global_step = sess.run(capsNet.global_step)
                sess.run(capsNet.train_op)

                if step % cfg.train_sum_freq == 0:
                    _, summary_str = sess.run(
                        [capsNet.train_op, capsNet.train_summary])
                    sv.summary_writer.add_summary(summary_str, global_step)

                if (global_step + 1) % cfg.test_sum_freq == 0:
                    test_acc = 0
                    for i in range(num_test_batch):
                        start = i * cfg.batch_size
                        end = start + cfg.batch_size
                        test_acc += sess.run(
                            capsNet.batch_accuracy, {
                                capsNet.X: teX[start:end],
                                capsNet.labels: teY[start:end]
                            })
                    test_acc = test_acc / (cfg.batch_size * num_test_batch)
                    fd_results.write(
                        str(global_step + 1) + ',' + str(test_acc) + '\n')
                    fd_results.flush()
                    summary_str = sess.run(capsNet.test_summary,
                                           {capsNet.test_acc: test_acc})
                    sv.summary_writer.add_summary(summary_str, global_step)

            if epoch % cfg.save_freq == 0:
                sv.saver.save(
                    sess, cfg.logdir + '/model_epoch_%04d_step_%02d' %
                    (epoch, global_step))

    fd_results.close()
    tf.logging.info('Training done')
Exemple #5
0
    def __init__(self,
                 batch_size,
                 use_recons_loss,
                 recon_with_y,
                 model_path="log"):
        self.batch_size = batch_size
        self.type_name = "with_y" if recon_with_y else "no_y"
        self.model_path = os.path.join(model_path, self.type_name)

        # data
        self.num_train = 60000 // self.batch_size
        self.num_test = 10000 // self.batch_size
        self.test_x, self.test_y = load_mnist(is_training=False)

        # net
        self.capsNet = CapsNet(batch_size=self.batch_size,
                               use_recons_loss=use_recons_loss,
                               recon_with_y=recon_with_y)

        # A training helper that checkpoints models and computes summaries.
        self.sv = tf.train.Supervisor(graph=self.capsNet.graph,
                                      logdir=self.model_path,
                                      save_model_secs=0)

        # config
        self.config = tf.ConfigProto()
        self.config.gpu_options.allow_growth = True
        pass
def predict_picture(pic_path,model_path,dataset,device):
	if dataset == 'char74k':
		size = 20
		num_classes = 26
	else:
		size = 28
		num_classes = 10
	transform = transforms.Compose([
		transforms.Scale(size),
		transforms.CenterCrop((size,size)),
		transforms.ToTensor(),
		transforms.Normalize((0.1307,),(0.3081,))
	])

	img = Image.open(pic_path)
	img_tensor = transform(img)
	img_tensor = img_tensor.unsqueeze(0)
	img_tensor = img_tensor.to(device)

	model = CapsNet(num_classes=num_classes,conv_in=3)
	model = model.to(device)
	model.eval()
	state_dict = torch.load(model_path)
	model.load_state_dict(state_dict)

	score = model(img_tensor)
	probs = nn.functional.softmax(score,dim=-1)
	max_value,index = torch.max(probs,dim=-1)
	print('The picture {} is:{}'.format(pic_path.split('/')[-1],char74k_id2labels[int(index)]))
Exemple #7
0
def main(_):
    data = d.load_data(cfg)
    model = CapsNet(data.image_axis_size)
    n_epochs = cfg.epochs
    n_iterations_per_epoch = data.train.num_examples // cfg.batch_size
    n_iterations_validation = data.validation.num_examples // cfg.batch_size
    if (cfg.is_training):
        train(True, n_epochs, n_iterations_per_epoch, n_iterations_validation,
              data, model)
    else:
        validate(data, model)
Exemple #8
0
def main(_):
    num_label = 10
    model = CapsNet()

    sv = tf.train.Supervisor(graph=model.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)

    if cfg.is_training:
        train(model, sv, num_label)
    else:
        evaluation(model, sv, num_label)
Exemple #9
0
def main(_):
    tf.logging.info(' Loading Graph...')
    num_label = 10
    model = CapsNet()
    tf.logging.info(' Graph loaded')

    sv = tf.train.Supervisor(graph=model.graph, logdir=cfg.logdir, save_model_secs=0)

    if cfg.is_training:
        tf.logging.info(' Start trainging...')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        evaluation(model, sv, num_label)
Exemple #10
0
def main(_):
    tf.logging.info(' Loading Graph...')
    num_label = 10
    model = CapsNet()
    tf.logging.info(' Graph loaded')
    
    sv = tf.train.Supervisor(graph=model.graph, logdir=cfg.logdir, save_model_secs=0)
    # The above is a deprecated version. Replacing it with MonitoredTraining Session - Sudeep
   #with model.graph.as_default():
   #    with tf.train.MonitoredTrainingSession(summary_dir=cfg.logdir, save_summaries_secs=0) as sv:
    
    if cfg.is_training:
        tf.logging.info(' Start training...')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        evaluation(model, sv, num_label)
Exemple #11
0
def main(_):
    tf.logging.info(' 获取CapsNet计算图...')
    num_label = 10
    model = CapsNet()
    tf.logging.info(' 计算图已经加载 ')

    sv = tf.train.Supervisor(graph=model.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)

    if cfg.is_training:
        tf.logging.info(' Start training...')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        evaluation(model, sv, num_label)

    return 0
Exemple #12
0
def main(_):
    tf.logging.info(' Loading Graph...')
    num_label = 10
    model = CapsNet(height=cfg.sample_height,
                    width=cfg.sample_width,
                    channels=cfg.sample_channel,
                    num_label=cfg.amount_label)
    tf.logging.info(' Graph loaded')

    sv = tf.train.Supervisor(graph=model.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)

    if cfg.is_training:
        tf.logging.info(' Start training...')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        evaluation(model, sv, num_label)
Exemple #13
0
def main(_):
    tf.logging.info('Loading Graph...')
    # model = CapsNet()
    tf.logging.info('Graph loaded')

    # if cfg.is_training:
    #     tf.logging.info('Start training...')
    #     train(model, sv, model.tr_batch_num)
    #     tf.logging.info('Training done')
    # else:
    #     if not cfg.predict:
    #         evaluation(model, sv)
    #     else:
    #         return prediction(model, sv)

    for i in range(8):
        model = CapsNet(i + 1)
        sv = tf.train.Supervisor(graph=model.graph,
                                 logdir=cfg.logdir,
                                 save_model_secs=0)
        train(model, sv, model.tr_batch_num)
        evaluation(model, sv, i + 1)
def eval(model_path, result_path, dataProcessor, num_classes, epoch, device,
         eval_type, logger):
    capsNet = CapsNet(num_classes=num_classes, conv_in=3)
    state_dict = torch.load(model_path + '/model_state_dict.pkl')
    capsNet.load_state_dict(state_dict)
    capsNet.to(device)
    valid_scores, valid_predicts, valid_labels = predict(
        capsNet, dataProcessor, num_classes, epoch, device, 'valid', logger)
    test_scores, test_predicts, test_labels = predict(capsNet, dataProcessor,
                                                      num_classes, epoch,
                                                      device, 'test', logger)
    store_results(test_predicts, test_labels, result_path)
    with open(result_path + '/result.csv', 'w') as file:
        file.write('valid scores,test scores\n')
        file.write(str(valid_scores) + ',' + str(test_scores))

    return valid_scores, test_scores
Exemple #15
0
def main(_):

    #remove cpu occupation
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
    #    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    #    os.environ['CUDA_VISIBLE_DEVICES'] = "0"
    #    os.environ['CUDA_VISIBLE_DEVICES'] = ""
    tf.reset_default_graph()
    tf.logging.info(' Loading Graph...')
    num_label = 52
    # reset graph
    #    tf.reset_default_graph()
    model = CapsNet()
    tf.logging.info(' Graph loaded')
    #    sv= tf.train.MonitoredTrainingSession(model.graph, checkpoint_dir=cfg.logdir,save_checkpoint_secs=3)
    sv = tf.train.Supervisor(graph=model.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)
    if cfg.is_training:
        tf.logging.info(' Start training...')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        evaluation(model, sv, num_label)
Exemple #16
0
def main(_):

    # ### Create the multimnist set.
    # create_multimnist(is_training=cfg.is_training)
    # trX, trY, num_tr_batch, valX, valY, num_val_batch = load_multimnist(is_training=cfg.is_training)
    # while True:
    #     index = np.random.randint(550000)
    #     print(trY[index])
    #     plt.imshow(np.squeeze(trX[index]))
    #     plt.show()
    # sys.exit()

    # The number of labels for the data.
    num_label = 10

    # Generate the model.
    tf.logging.info('Loading Graph...')
    if cfg.use_cnn == True:
        model = cnn()
    else:
        model = CapsNet()       

    tf.logging.info('Graph loaded')

    # Generate the supervisor.
    sv = tf.train.Supervisor(graph=model.graph, logdir=cfg.logdir, save_model_secs=0)
    
    # Run training or evaluation.
    if cfg.is_training:
        tf.logging.info('Start training')
        train(model, sv, num_label)
        tf.logging.info('Training done')
    else:
        tf.logging.info('Start evaluation')
        evaluation(model, sv, num_label)
        tf.logging.info('Evaluation done')
Set up variables
'''
config = tf.ConfigProto()
    
config.gpu_options.allow_growth = True
train_sum_freq = 10
val_sum_freq = 50

'''
Set up model
'''
#To make it Distributed
device, target = device_and_target() # getting node environment
with tf.device(device): 
     global_step1 = tf.train.get_or_create_global_step()
     model = CapsNet(batch=FLAGS.batch_size, mnist=FLAGS.use_mnist, data_path=FLAGS.path_to_data,global_step=global_step1)
step1 = tf.assign_add(global_step1,1)
'''
Load the data
'''
trX, trY, num_tr_batch, valX, valY, num_val_batch = load_mnist(FLAGS.batch_size, is_training=True, 
                                                               path=FLAGS.path_to_data, mnist=FLAGS.use_mnist)

#Format Y    
Y = valY[:num_val_batch * FLAGS.batch_size].reshape((-1, 1))

'''
Run the Model

Pass in target to determine the worker
'''
Exemple #18
0
import tensorflow as tf

from config import cfg
from capsNet import CapsNet

if __name__ == "__main__":
    capsNet = CapsNet(is_training=cfg.is_training)
    tf.logging.info('Graph loaded')
    sv = tf.train.Supervisor(graph=capsNet.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0)

    with sv.managed_session() as sess:
        num_batch = int(60000 / cfg.batch_size)
        for epoch in range(cfg.epoch):
            if sv.should_stop():
                break
            for step in range(num_batch):
                sess.run(capsNet.train_op)

            global_step = sess.run(capsNet.global_step)
            sv.saver.save(
                sess, cfg.logdir + '/model_epoch_%04d_step_%02d' %
                (epoch, global_step))

    tf.logging.info('Training done')
import tensorflow as tf
import numpy as np
import utils
from capsNet import CapsNet

if __name__ == "__main__":
    vec, label, map_label = utils.get_vector('train', 12, None)
    vec = np.array(vec).astype(np.float)
    vec_test, label_test, _ = utils.get_vector('test', 12, map_label)
    vec_test = np.array(vec_test).astype(np.float)
    vec = np.array(vec.reshape(-1, 12, 25, 1)).astype(np.float32)
    vec_test = vec_test.reshape(-1, 12, 25, 1)

    caps_model = CapsNet()
    earlyStopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                     patience=8)
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                     factor=0.2,
                                                     patience=3,
                                                     min_lr=1e-5)
    #tensorBoard = tf.keras.callbacks.TensorBoard(log_dir='logs')
    caps_model.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
                       loss=caps_model.margin_loss,
                       metrics=['accuracy'])
    caps_model.fit(vec,
                   label,
                   batch_size=32,
                   epochs=50,
                   validation_data=(vec_test, label_test),
                   callbacks=[earlyStopping, reduce_lr])
epochs = cfg.epochs
dataset = cfg.dataset
if dataset == 'MNIST':
    num_classes = 10
else:
    num_classes = 26

dataProcessor = DataProcessor(batch_size=batch_size,
                              dataset=dataset,
                              data_path='data/char74k_preprocessed')
if torch.cuda.is_available():
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

capsNet = CapsNet(conv_in=3, num_classes=num_classes).to(device)
cseloss = torch.nn.CrossEntropyLoss()
adamOptimizer = torch.optim.Adam(params=capsNet.parameters())
best_val_acc = -1
best_val_epoch = 0
for epoch in tqdm(range(epochs)):
    epoch_loss = 0
    for batch_id, (data, target) in enumerate(dataProcessor.train_loader):
        capsNet.train()
        target = torch.eye(num_classes).index_select(dim=0, index=target)

        data = data.to(device)
        target = target.to(device)

        adamOptimizer.zero_grad()
        # output,reconstructions,masked = capsNet(data)
Exemple #21
0
def main(_):
    #     print("PrePreA")
    capsNet = CapsNet(is_training=cfg.is_training)
    #     print("PreA")
    tf.logging.info('Graph loaded')
    sv = tf.train.Supervisor(graph=capsNet.graph,
                             logdir=cfg.logdir,
                             save_model_secs=0,
                             summary_op=None)

    path = cfg.results + '/accuracy.csv'
    if not os.path.exists(cfg.results):
        os.mkdir(cfg.results)
    elif os.path.exists(path):
        os.remove(path)

    fd_results = open(path, 'w')
    fd_results.write('step,test_acc\n')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    ex_imgs, ex_imgs_label, imgs_scale, imgs_rotated, imgs_label = load_mnist_affNIST(
        True)
    teX, teY, teCtrl = load_mnist_affNIST(False)
    num_batches = ex_imgs.shape[0] + imgs_rotated.shape[0] + imgs_scale.shape[0]

    #     print("A")
    with sv.managed_session(config=config) as sess:
        num_test_batch = teX.shape[0] // cfg.batch_size

        for epoch in range(cfg.epoch):

            # Recreate/reshuffle batches for each epoch
            var_seq = np.random.choice(
                range(6), size=num_batches,
                replace=True)  # select 6 to attain 1:1:4 ratio
            imgs_rotated_select = np.random.choice(range(
                imgs_rotated.shape[0]),
                                                   size=sum(var_seq == 0),
                                                   replace=True)
            imgs_scale_select = np.random.choice(range(imgs_scale.shape[0]),
                                                 size=sum(var_seq == 1),
                                                 replace=True)
            #             imgs_hscale_select = np.random.choice(range(imgs_hscale.shape[0]),size = sum(var_seq == 2), replace = True)
            ex_imgs_select = np.random.choice(
                range(ex_imgs.shape[0]),
                size=sum(var_seq >= cfg.num_ex_var),
                replace=True)
            var_seq = np.array(sorted(var_seq))

            epoch_batches_imgs = np.concatenate([
                imgs_rotated[imgs_rotated_select],
                imgs_scale[imgs_scale_select], ex_imgs[ex_imgs_select]
            ],
                                                axis=0)
            epoch_batches_imgs = epoch_batches_imgs.reshape(
                [num_batches, cfg.batch_size, cfg.img_dim, cfg.img_dim, 1])

            epoch_batches_labels = np.concatenate([
                imgs_label[imgs_rotated_select], imgs_label[imgs_scale_select],
                ex_imgs_label[ex_imgs_select]
            ],
                                                  axis=0)

            shuffle_ind = np.random.choice(range(num_batches),
                                           size=num_batches,
                                           replace=False)
            var_seq = var_seq[shuffle_ind]
            epoch_batches_imgs = epoch_batches_imgs[shuffle_ind]
            epoch_batches_labels = epoch_batches_labels[shuffle_ind]

            if sv.should_stop():
                break
            for step in tqdm(range(num_batches),
                             total=num_batches,
                             ncols=70,
                             leave=False,
                             unit='b'):

                _, global_step = sess.run(
                    [capsNet.train_op, capsNet.global_step],
                    feed_dict={
                        capsNet.X: epoch_batches_imgs[step],
                        capsNet.labels: epoch_batches_labels[step],
                        capsNet.train_var: var_seq[step]
                    })

                if step % cfg.train_sum_freq == 0:
                    _, summary_str = sess.run(
                        [capsNet.train_op, capsNet.train_summary],
                        feed_dict={
                            capsNet.X: epoch_batches_imgs[step],
                            capsNet.labels: epoch_batches_labels[step],
                            capsNet.train_var: var_seq[step]
                        })
                    sv.summary_writer.add_summary(summary_str, global_step)

                if (global_step + 1) % cfg.test_sum_freq == 0:
                    test_acc = 0
                    for i in range(num_test_batch):
                        start = i * cfg.batch_size
                        end = start + cfg.batch_size
                        test_acc += sess.run(
                            capsNet.batch_accuracy, {
                                capsNet.X: teX[start:end],
                                capsNet.labels: teY[start:end]
                            })
                    test_acc = test_acc / (cfg.batch_size * num_test_batch)
                    fd_results.write(
                        str(global_step + 1) + ',' + str(test_acc) + '\n')
                    fd_results.flush()

                if global_step % cfg.save_freq == 0:
                    sv.saver.save(
                        sess, cfg.logdir + '/model_epoch_%04d_step_%02d' %
                        (epoch, global_step))

        sv.saver.save(
            sess, cfg.logdir + '/Final_model_epoch_%02d_step_%04d' %
            (epoch, global_step))

    fd_results.close()
    tf.logging.info('Training done')
Exemple #22
0
def train(do_k_fold, out_dir, log_f):
    if do_k_fold:
        utils.print_out("# do k_fold k=%d" % k_fold, log_f)
        k_fold_val = 0
        k_fold_tra = 0
        [k_TP, k_TN, k_FP, k_FN, k_SE, k_SP, k_MCC, k_ACC] = [0, 0, 0, 0, 0, 0, 0, 0]
        for i in range(k_fold):
            trX_=[]
            trY_=[]
            for j in range(k_fold):
                if j == i: continue
                trX_.append(k_fold_X[j])
                trY_.append(k_fold_Y[j])
            trX_ = np.concatenate(trX_)
            trY_ = np.concatenate(trY_)
            utils.print_out("#k_fold %d" % i, log_f)
            utils.print_out("#do DBN ...", log_f)
            dbn = DBN()
            dbn.train(trX_)
            utils.print_out("#end DBN", log_f)
            utils.print_out("#do caps ...", log_f)
            capsNet = CapsNet(is_training=True, dbn=dbn)

            i_k_fold_val, i_k_fold_tra = capsNet.train(trX_, trY_, k_fold_X[i], k_fold_Y[i], None, log_f)
            TP, TN, FP, FN, SE, SP, MCC, ACC = eva(capsNet, k_fold_X[i], k_fold_Y[i])
            print(i,", TP:", TP)
            print(i,", TN:", TN)
            print(i,", FP:", FP)
            print(i,", FN:", FN)
            print(i,", SE:", SE)
            print(i,", SP:", SP)
            print(i,", MCC:", MCC)
            print(i,", ACC: ", ACC)
            k_TP += TP
            k_TN += TN
            k_FP += FP
            k_FN += FN
            k_SE += SE
            k_SP += SP
            k_MCC += MCC
            k_ACC += ACC

        print("TP :", k_TP / 5)
        print("TN :", k_TN / 5)
        print("FP :", k_FP / 5)
        print("FN :", k_FN / 5)
        print("SE :", k_SE / 5)
        print("SP :", k_SP / 5)
        print("MCC: ", k_MCC / 5)
        print("ACC: ", k_ACC / 5)
    else:
        utils.print_out("#do DBN ...", log_f)
        dbn = DBN()
        dbn.train(trX)
        utils.print_out("#end DBN", log_f)
        utils.print_out("#do caps ...", log_f)
        utils.print_out("#test instead val set for test ...", log_f)
        capsNet = CapsNet(is_training=isTraining, dbn=dbn)
        if isTraining:
            i_k_fold_val, i_k_fold_tra = capsNet.train(trX, trY, teX, teY, "./board", log_f)
            utils.print_out("#end caps", log_f)

            tr_TP, tr_TN, tr_FP, tr_FN, tr_SE, tr_SP, tr_MCC, tr_ACC = eva(capsNet, trX, trY)
            val_TP, val_TN, val_FP, val_FN, val_SE, val_SP, val_MCC, val_ACC = eva(capsNet, vaX, vaY)
            te_P, te_TN, te_FP, te_FN, te_SE, te_SP, te_MCC,te_ACC = eva(capsNet, teX, teY)
            utils.print_out('train : TP:%.3f;   TN:%.3f;      FP:%.3f;     FN:%.3f;  SE:%.3f  SP:%.3f   MCC:%.3f  P:%.3f' \
                            %(tr_TP, tr_TN, tr_FP, tr_FN, tr_SE, tr_SP, tr_MCC, tr_ACC), log_f)
            utils.print_out('val : TP:%.3f;   TN:%.3f;      FP:%.3f;      FN:%.3f;  SE:%.3f  SP:%.3f   MCC:%.3f P:%.3f' \
                            % (val_TP, val_TN, val_FP, val_FN, val_SE, val_SP, val_MCC, val_ACC), log_f)
            utils.print_out('test : TP:%.3f;   TN:%.3f;      FP:%.3f;      FN:%.3f;  SE:%.3f  SP:%.3f   MCC:%.3f P:%.3f' \
                            % (te_P, te_TN, te_FP, te_FN, te_SE, te_SP, te_MCC, te_ACC), log_f)

        else:
            import csv
            csvFile = open("./"+train_datadir+"/"+setFileNames[1], "r")
            reader = csv.reader(csvFile)  # 返回的是迭代类型
            data = []
            for item in reader:
                data.append(item[0])
            csvFile.close()
            data = data[1:]

            utils.print_out("#end caps", log_f)
            pre_Y= pre(capsNet, vaX).tolist()[0]
            import pandas as pd

            dataFrame = pd.DataFrame({ "0_name": data,"1_class": pre_Y})
            dataFrame.to_csv('./data_set/test_dir/180831-result.csv', index=False, sep=",")