示例#1
0
def validation_dice(val_dir):
    input_image_tensor = tf.placeholder(dtype=tf.float32,
                                        shape=[
                                            None, Config.vox_size[0],
                                            Config.vox_size[1],
                                            Config.vox_size[2], 1
                                        ],
                                        name='image_tensor')
    input_gt_tensor = tf.placeholder(dtype=tf.int32,
                                     shape=[
                                         None, Config.vox_size[0],
                                         Config.vox_size[1], Config.vox_size[2]
                                     ],
                                     name='gt_tensor')
    pred_last, pred_6, pred_3 = model(input_image_tensor)
    global_step = tf.train.get_or_create_global_step()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)

        ckpt = tf.train.latest_checkpoint(FLAGS.model_restore_path)
        print('continue training from previous checkpoint from %s' % ckpt)
        start_step = int(os.path.basename(ckpt).split('-')[1])
        variable_restore_op = slim.assign_from_checkpoint_fn(
            ckpt, slim.get_trainable_variables(), ignore_missing_vars=True)
        variable_restore_op(sess)
        sess.run(tf.assign(global_step, start_step))

        # obtain the data of validation
        image_paths = glob(os.path.join(val_dir, 'volume-*.nii'))
        for image_path in image_paths:
            basename = os.path.basename(image_path)
            file_id = basename.split('-')[1].split('.')[0]
            gt_path = os.path.join(val_dir, 'segmentation-' + file_id + '.nii')
            pred_result = compute_onefile()
                                             num_workers=8,
                                             pin_memory=True)
    """
    eval_iter = torch.utils.data.DataLoader(val_loader,
                                            batch_size=args.batch_size,
                                            shuffle=True,
                                            num_workers=1,
                                            pin_memory=True)
    """
    iter_seed = torch.initial_seed() + 100

    network = AnomalyDetector()
    net = model(
        net=network,
        criterion=RegularizedLoss(network, custom_objective).to(device),
        model_prefix=args.model_dir,
        step_callback_freq=5,
        save_checkpoint_freq=args.save_frequency,
        opt_batch_size=args.batch_size,  # optional, 60 in the paper
    )

    if torch.cuda.is_available():
        net.net.cuda()
        torch.cuda.manual_seed(args.random_seed)
        net.net = torch.nn.DataParallel(net.net).cuda()
    """
    In the original paper:
    lr = 0.01
    epsilon = 1e-8
    """
    optimizer = torch.optim.Adadelta(net.net.parameters(),
                                     lr=args.lr_base,
示例#3
0
def pred(args):
    charset = label_utils.get_charset(conf.CHARSET)
    CHARSET_SIZE = len(charset)

    # 定义模型
    _, decoder_model, encoder_model = _model.model(conf, args)

    # 分别加载模型
    encoder_model.load_model(args.model)
    decoder_model.load_model(args.model)
    logger.info("加载了模型:%s", args.model)

    logger.info("开始预测图片:%s", args.image)
    image = cv2.imread(args.image)

    # 编码器先预测
    encoder_out_states, encoder_fwd_state, encoder_back_state = encoder_model.predict(
        image)

    # 准备编码器的初始输入状态
    decoder_init_state = np.concatenate(
        [encoder_fwd_state, encoder_back_state], axis=-1)

    attention_weights = []

    # 开始是STX
    from utils.label.label_utils import convert_to_id
    decoder_index = convert_to_id([conf.CHAR_STX], charset)
    decoder_state = decoder_init_state

    result = ""

    # 开始预测字符
    for i in range(conf.MAX_SEQUENCE):

        # 别看又padding啥的,其实就是一个字符,这样做是为了凑输入的维度定义
        decoder_inputs = pad_sequences(decoder_index,
                                       maxlen=conf.MAX_SEQUENCE,
                                       padding="post",
                                       value=0)
        decoder_inputs = to_categorical(decoder_inputs,
                                        num_classes=CHARSET_SIZE)

        # infer_decoder_model : Model(inputs=[decoder_inputs, encoder_out_states,decoder_init_state],
        # outputs=[decoder_pred,attn_states,decoder_state])
        # encoder_out_states->attention用
        decoder_out, attention, decoder_state = \
            decoder_model.predict([decoder_inputs,decoder_state])

        encoder_out_states

        # beam search impl
        max_k_index = decoder_out.argsort()[:3]
        max_prob = decoder_out[max_k_index]
        max_labels = label_utils.id2strs(max_k_index)  #TODO id2strs

        # 得到当前时间的输出,是一个3770的概率分布,所以要argmax,得到一个id
        decoder_index = np.argmax(decoder_out, axis=-1)[0, 0]

        if decoder_index == 2:
            logger.info("预测字符为ETX,退出")
            break  #==>conf.CHAR_ETX: break

        attention_weights.append(attention)

        pred_char = label_utils.ids2str(decoder_index, charset=charset)

        logger.info("预测字符为:%s", pred_char)
        result += pred_char

    if len(result) >= conf.MAX_SEQUENCE:
        logger.debug("预测字符为:%s,达到最大预测长度", result)
    else:
        logger.debug("预测字符为:%s,解码最后为ETX", result)

    return pred_char, attention_weights
示例#4
0
        #     if step % FLAGS.snap_interval == 0:
        #         print('model saving in ', os.path.join(FLAGS.model_save_path, 'model.ckpt'))
        #         saver.save(sess, os.path.join(FLAGS.model_save_path, 'model.ckpt'), global_step=global_step)
        # train_summary.close()
        # val_summary.close()


if __name__ == '__main__':
    input_image_tensor = tf.placeholder(dtype=tf.float32,
                                        shape=[
                                            None, Config.vox_size[0],
                                            Config.vox_size[1],
                                            Config.vox_size[2], 1
                                        ],
                                        name='image_tensor')
    pred_last, pred_6, pred_3 = model(input_image_tensor)
    global_step = tf.train.get_or_create_global_step()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)

        ckpt = tf.train.latest_checkpoint(FLAGS.model_restore_path)
        print('continue training from previous checkpoint from %s' % ckpt)
        start_step = int(os.path.basename(ckpt).split('-')[1])
        variable_restore_op = slim.assign_from_checkpoint_fn(
            ckpt, slim.get_trainable_variables(), ignore_missing_vars=True)
        variable_restore_op(sess)
        sess.run(tf.assign(global_step, start_step))

        pred_mask = compute_onefile(
            sess, input_image_tensor, tf.nn.softmax(pred_last),
示例#5
0
def train():
    input_image_tensor = tf.placeholder(dtype=tf.float32,
                                        shape=[None, Config.vox_size[0], Config.vox_size[1], Config.vox_size[2], 1],
                                        name='image_tensor')
    input_gt_tensor = tf.placeholder(dtype=tf.int32,
                                     shape=[None, Config.vox_size[0], Config.vox_size[1], Config.vox_size[2]],
                                     name='gt_tensor')
    global_step = tf.train.get_or_create_global_step()
    learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, global_step, decay_steps=10000, decay_rate=0.1,
                                               staircase=True)
    opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
    pred_last, pred_6, pred_3 = model(input_image_tensor)
    pred_last_softmax = tf.nn.softmax(pred_last)
    model_loss, cross_entropy_last, cross_entropy_6, cross_entropy_3 = build_loss(input_gt_tensor, pred_last, pred_6,
                                                                                  pred_3, global_step=global_step)
    total_loss = tf.add_n([model_loss] + tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
    tf.summary.image('image', tf.transpose(input_image_tensor[0, :, :, 10:15], perm=[2, 0, 1, 3]), max_outputs=3)
    tf.summary.image('gt',
                     tf.expand_dims(tf.transpose(tf.cast(input_gt_tensor, tf.float32)[0, :, :, 10:15], perm=[2, 0, 1]),
                                    axis=3),
                     max_outputs=3)
    tf.summary.image('pred_last',
                     tf.expand_dims(
                         tf.transpose(tf.cast(tf.argmax(tf.nn.softmax(pred_last)[0, :, :, 10:15, :], axis=3), tf.uint8),
                                      perm=[2, 0, 1]),
                         axis=3),
                     max_outputs=3)
    tf.summary.image('pred_6',
                     tf.expand_dims(
                         tf.transpose(tf.cast(tf.argmax(tf.nn.softmax(pred_6)[0, :, :, 10:15, :], axis=3), tf.uint8),
                                      perm=[2, 0, 1]),
                         axis=3),
                     max_outputs=3)
    tf.summary.image('pred_3',
                     tf.expand_dims(
                         tf.transpose(tf.cast(tf.argmax(tf.nn.softmax(pred_3)[0, :, :, 10:15, :], axis=3), tf.uint8),
                                      perm=[2, 0, 1]),
                         axis=3),
                     max_outputs=3)
    tf.summary.scalar('loss/model_loss', model_loss)
    tf.summary.scalar('loss/total_loss', total_loss)
    tf.summary.scalar('loss/cross_entropy_last', cross_entropy_last)
    tf.summary.scalar('loss/cross_entropy_6', cross_entropy_6)
    tf.summary.scalar('loss/cross_entropy_3', cross_entropy_3)
    # grads = opt.compute_gradients(total_loss)
    # apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
    apply_gradient_op = opt.minimize(total_loss, global_step=global_step)
    variable_averages = tf.train.ExponentialMovingAverage(
        FLAGS.moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([variables_averages_op, apply_gradient_op]):
        train_op = tf.no_op(name='train_op')
    train_summary = tf.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'train'), tf.get_default_graph())
    val_summary = tf.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'val'), tf.get_default_graph())
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        # Reader1 Version
        # reader = Reader(
        #     '/home/give/Documents/dataset/ISBI2017/media/nas/01_Datasets/CT/LITS/Training Batch 2',
        #     '/home/give/Documents/dataset/ISBI2017/media/nas/01_Datasets/CT/LITS/Training Batch 1',
        #     batch_size=FLAGS.batch_size
        # )
        # train_generator = reader.train_generator
        # val_generator = reader.val_generator
        reader = Reader2(
            '/home/give/Documents/dataset/ISBI2017/media/nas/01_Datasets/CT/LITS/Training Batch 2_Patch',
            '/home/give/Documents/dataset/ISBI2017/media/nas/01_Datasets/CT/LITS/Training Batch 1_Patch',
            batch_size=FLAGS.batch_size
        )
        start_step = 0
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
        if FLAGS.restore_flag:
            ckpt = tf.train.latest_checkpoint(FLAGS.model_restore_path)
            print('continue training from previous checkpoint from %s' % ckpt)
            start_step = int(os.path.basename(ckpt).split('-')[1])
            variable_restore_op = slim.assign_from_checkpoint_fn(ckpt,
                                                                 slim.get_trainable_variables(),
                                                                 ignore_missing_vars=True)
            variable_restore_op(sess)
            sess.run(tf.assign(global_step, start_step))
        print('Start iterator!')
        for step in range(start_step, FLAGS.max_steps + start_step):
            # train_img_path_batch, train_gt_path_batch = train_generator.__next__()
            # train_image_batch, train_gt_batch = Reader.processing1(train_img_path_batch, train_gt_path_batch,
            #                                                        crop_num=FLAGS.crop_num)
            print('Start Reading')
            train_image_batch, train_gt_batch = reader.get_next_batch(is_training=True)
            print('Batch Images: ', np.shape(train_image_batch))
            # print(np.shape(train_image_batch))
            # train_image_batch, train_gt_batch = Reader.processing2(train_img_path_batch, train_gt_path_batch,
            #                                                        slice_num=FLAGS.slice_num)
            # pred_last_value, pred_6_value, pred_3_value = sess.run([pred_last, pred_6, pred_3], feed_dict={
            #     input_image_tensor: np.expand_dims(train_image_batch, axis=4)
            # })
            # print('Pred last, max: {}, min: {}.'.format(np.max(pred_last_value), np.min(pred_last_value)))
            # print('Pred 6, max: {}, min: {}.'.format(np.max(pred_6_value), np.min(pred_6_value)))
            # print('Pred 3, max: {}, min: {}.'.format(np.max(pred_3_value), np.min(pred_3_value)))
            # print('InputImage, max: {}, min: {}'.format(np.max(train_image_batch), np.min(train_image_batch)))
            _, pred_last_softmax_value, total_loss_value, model_loss_value, cross_entropy_last_value, cross_entropy_6_value, cross_entropy_3_value, learning_rate_value, summary_value = sess.run(
                [train_op, pred_last_softmax, total_loss, model_loss, cross_entropy_last, cross_entropy_6, cross_entropy_3, learning_rate,
                 summary_op], feed_dict={
                    input_image_tensor: np.expand_dims(train_image_batch, axis=4),
                    input_gt_tensor: train_gt_batch
                })
            train_summary.add_summary(summary_value, global_step=step)
            if step % FLAGS.print_interval == 0:
                # pred_last_value, pred_6_value, pred_3_value = sess.run([pred_last, pred_6, pred_3], feed_dict={
                #     input_image_tensor: np.expand_dims(train_image_batch, axis=4)
                # })
                # print('Pred last, max: {}, min: {}.'.format(np.max(pred_last_value), np.min(pred_last_value)))
                # print('Pred 6, max: {}, min: {}.'.format(np.max(pred_6_value), np.min(pred_6_value)))
                # print('Pred 3, max: {}, min: {}.'.format(np.max(pred_3_value), np.min(pred_3_value)))
                # print('InputImage, max: {}, min: {}'.format(np.max(train_image_batch), np.min(train_image_batch)))
                print(
                    'Training, Step: {}, total loss: {:.4f}, model loss: {:.04f}, cross_entropy_last: {:.4f}, cross_entropy_6: {:.4f}, cross_entropy_3: {:.4f}, learning rate: {:.7f}'.format(
                        step,
                        total_loss_value,
                        model_loss_value,
                        cross_entropy_last_value,
                        cross_entropy_6_value,
                        cross_entropy_3_value,
                        learning_rate_value))
                print('0, max: ', np.max(pred_last_softmax_value[:, :, :, :, 0]),
                      np.min(pred_last_softmax_value[:, :, :, :, 0]))
                print('1, max: ', np.max(pred_last_softmax_value[:, :, :, :, 1]),
                      np.min(pred_last_softmax_value[:, :, :, :, 1]))

            if step % FLAGS.val_interval == 0:
                # val_img_path_batch, val_gt_path_batch = val_generator.__next__()
                # val_image_batch, val_gt_batch = Reader.processing1(val_img_path_batch, val_gt_path_batch,
                #                                                    crop_num=FLAGS.crop_num)
                val_image_batch, val_gt_batch = reader.get_next_batch(is_training=False)
                # val_image_batch, val_gt_batch = Reader.processing2(val_img_path_batch, val_gt_path_batch,
                #                                                    slice_num=FLAGS.slice_num)
                total_loss_value, model_loss_value, learning_rate_value, summary_value = sess.run(
                    [total_loss, model_loss, learning_rate, summary_op], feed_dict={
                        input_image_tensor: np.expand_dims(val_image_batch, axis=4),
                        input_gt_tensor: val_gt_batch
                    })
                val_summary.add_summary(summary_value, global_step=step)
                print(
                    'Validation, Step: {}, total loss: {:.4f}, model loss: {:.04f}, learning rate: {:.7f}'.format(step,
                                                                                                                  total_loss_value,
                                                                                                                  model_loss_value,
                                                                                                                  learning_rate_value))
            if step % FLAGS.snap_interval == 0:
                print('model saving in ', os.path.join(FLAGS.model_save_path, 'model.ckpt'))
                saver.save(sess, os.path.join(FLAGS.model_save_path, 'model.ckpt'), global_step=global_step)
        train_summary.close()
        val_summary.close()