コード例 #1
0
ファイル: LeNet_2.py プロジェクト: UpCoder/MedicalImage
def val(val_data_set, load_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    global_step = tf.Variable(0, trainable=False)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())

        validation_images, validation_labels = val_data_set.images, val_data_set.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        # validation_labels[validation_labels == 1] = 0
        # validation_labels[validation_labels == 3] = 0
        # validation_labels[validation_labels == 4] = 1
        # validation_labels[validation_labels == 2] = 1
        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        recall, precision, f1_score = get_game_evaluate(
            np.argmax(logits, 1), validation_labels)
        validation_labels = np.array(validation_labels)
        print 'label=0 %d, label=1 %d' % (np.sum(validation_labels == 0),
                                          np.sum(validation_labels == 1))
        print 'recall is %g, precision is %g, f1_score is %g' % (
            recall, precision, f1_score)
        print 'accuracy is %g' % \
              (validation_accuracy)
        return error_indexs, error_record
コード例 #2
0
ファイル: ResNet.py プロジェクト: UpCoder/MedicalImage
def val(val_data_set, load_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    # global_step = tf.Variable(0, trainable=False)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)
        validation_images, validation_labels = val_data_set.images, val_data_set.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        validation_accuracy, validation_loss, logits = sess.run(
            [accuracy_tensor, loss_, y],
            feed_dict={
                x: validation_images,
                y_: validation_labels
            })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        print 'validation loss value is %g, accuracy is %g' % \
              (validation_loss, validation_accuracy)
        return error_indexs, error_record
コード例 #3
0
ファイル: LeNet.py プロジェクト: UpCoder/MedicalImage
def val(dataset, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        validation_images, validation_labels = dataset.images, dataset.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        print 'accuracy is %g' % \
              (validation_accuracy)
        return error_indexs, error_record
コード例 #4
0
def val(dataset, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y, features = inference(x, regularizer, return_feature=True)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        validation_images, validation_labels = dataset.images, dataset.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        validation_accuracy, features_value = sess.run(
            [accuracy_tensor, features],
            feed_dict={
                x: validation_images,
                y_: validation_labels
            })
        print validation_accuracy
        return features_value
コード例 #5
0
ファイル: fine_tuning.py プロジェクト: UpCoder/MedicalImage
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(
            loss=loss_, global_step=global_step)
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter('./log/fine_tuning/train',
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val',
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.get_next_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images = changed_shape(images, [
                len(images), sub_Config.IMAGE_W, sub_Config.IMAGE_W,
                sub_Config.IMAGE_CHANNEL
            ])
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=global_step_value)
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                import os
                saveedpath = os.path.join(save_model_path,
                                          str(global_step_value))
                if not os.path.exists(saveedpath):
                    os.mkdir(saveedpath)
                saveedpath += '/model.ckpt'
                saver.save(sess, saveedpath, global_step=global_step_value)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.get_next_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                validation_images = changed_shape(validation_images, [
                    len(validation_images), sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W, 1
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss_, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                binary_acc = acc_binary_acc(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
        writer.close()
        val_writer.close()
コード例 #6
0
ファイル: train.py プロジェクト: UpCoder/MedicalImage
def train(dataset, load_model_path, save_model_path, train_log_dir, val_log_dir):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    # global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # variable_averages_op = variable_average.apply(tf.trainable_variables())
    regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y,
            labels=tf.cast(y_, tf.int32)
        )
    ) + tf.add_n(tf.get_collection('losses'))
    tf.summary.scalar(
        'loss',
        loss
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss,
        # global_step=global_step
    )
    # with tf.control_dependencies([train_step, variable_averages_op]):
    #     train_op = tf.no_op(name='train')
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter(train_log_dir, tf.get_default_graph())
        val_writer = tf.summary.FileWriter(val_log_dir, tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = dataset.get_next_train_batch(sub_Config.BATCH_SIZE)
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            _, loss_value, accuracy_value, summary = sess.run(
                [train_op, loss, accuracy_tensor, merge_op],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=i
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                save_model_path = save_model_path+'_' + str(i)
                import os
                if not os.path.exists(save_model_path):
                    os.mkdir(save_model_path)
                save_model_path += '/'
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = dataset.get_next_val_batch(sub_Config.BATCH_SIZE)
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_CHANNEL
                    ]
                )
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, i)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g' % \
                      (i, loss_value, accuracy_value, validation_loss, validation_accuracy)
        writer.close()
        val_writer.close()
コード例 #7
0
def train(dataset, load_model=False):
    x1 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                        ],
                        name='input_x1')
    x2 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                        ],
                        name='input_x2')
    x3 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                        ],
                        name='input_x3')

    bg1 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                             sub_Config.bg_sizes[0][1],
                             sub_Config.bg_sizes[0][2]
                         ],
                         name='input_bg1')
    bg2 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                             sub_Config.bg_sizes[1][1],
                             sub_Config.bg_sizes[1][2]
                         ],
                         name='input_bg2')
    bg3 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                             sub_Config.bg_sizes[2][1],
                             sub_Config.bg_sizes[2][2]
                         ],
                         name='input_bg3')
    tf.summary.image('input_x1', x1, max_outputs=5)
    tf.summary.image('input_x2', x2, max_outputs=5)
    tf.summary.image('input_x2', x3, max_outputs=5)
    tf.summary.image('input_bg1', bg1, max_outputs=5)
    tf.summary.image('input_bg2', bg2, max_outputs=5)
    tf.summary.image('input_bg3', bg3, max_outputs=5)
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference([x1, x2, x3], [bg1, bg2, bg3], regularizer)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.cast(y_, tf.int32))) + tf.add_n(
                tf.get_collection('losses'))
    tf.summary.scalar('loss', loss)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(loss=loss)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        if load_model:
            saver.restore(sess, sub_Config.MODEL_SAVE_PATH)
        writer = tf.summary.FileWriter(sub_Config.TRAIN_LOG_DIR,
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter(sub_Config.VAL_LOG_DIR,
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            # images, labels = dataset.train.next_batch(sub_Config.BATCH_SIZE)
            # labels = np.argmax(labels, 1)
            # # print np.shape(labels)
            # images = np.reshape(
            #     images,
            #     [
            #         sub_Config.BATCH_SIZE,
            #         sub_Config.IMAGE_W,
            #         sub_Config.IMAGE_H,
            #         sub_Config.IMAGE_CHANNEL
            #     ]
            # )
            images, labels, bgs = dataset.next_train_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images1 = images[:, 0, :]
            images2 = images[:, 1, :]
            images3 = images[:, 2, :]
            images1 = changed_shape(images1, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                sub_Config.sizes[0][1], sub_Config.sizes[0][2]
            ])
            images2 = changed_shape(images2, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                sub_Config.sizes[1][1], sub_Config.sizes[1][2]
            ])
            images3 = changed_shape(images3, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                sub_Config.sizes[2][1], sub_Config.sizes[2][2]
            ])

            input_bg1 = bgs[:, 0, :]
            input_bg2 = bgs[:, 1, :]
            input_bg3 = bgs[:, 2, :]
            input_bg1 = changed_shape(input_bg1, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                sub_Config.bg_sizes[0][1], sub_Config.bg_sizes[0][2]
            ])
            input_bg2 = changed_shape(input_bg2, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                sub_Config.bg_sizes[1][1], sub_Config.bg_sizes[1][2]
            ])
            input_bg3 = changed_shape(input_bg3, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                sub_Config.bg_sizes[2][1], sub_Config.bg_sizes[2][2]
            ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(
                    np.asarray(images3[0, :, :, 0], np.uint8))
                image.show()
            # images = np.reshape(
            #     images[:, :, :, 2],
            #     [
            #         sub_Config.BATCH_SIZE,
            #         sub_Config.IMAGE_W,
            #         sub_Config.IMAGE_W,
            #         1
            #     ]
            # )
            _, loss_value, accuracy_value, summary = sess.run(
                [train_op, loss, accuracy_tensor, merge_op],
                feed_dict={
                    x1: images1,
                    x2: images2,
                    x3: images3,
                    bg1: input_bg1,
                    bg2: input_bg2,
                    bg3: input_bg3,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=i)
            if i % 1000 == 0 and i != 0:
                # 保存模型
                saver.save(sess, sub_Config.MODEL_SAVE_PATH)
            if i % 100 == 0:
                validation_images, validation_labels, bgs = dataset.next_val_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                images1 = validation_images[:, 0, :]
                images2 = validation_images[:, 1, :]
                images3 = validation_images[:, 2, :]
                images1 = changed_shape(images1, [
                    len(validation_images), sub_Config.sizes[0][0],
                    sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                ])
                images2 = changed_shape(images2, [
                    len(validation_images), sub_Config.sizes[1][0],
                    sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                ])
                images3 = changed_shape(images3, [
                    len(validation_images), sub_Config.sizes[2][0],
                    sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                ])
                input_bg1 = bgs[:, 0, :]
                input_bg2 = bgs[:, 1, :]
                input_bg3 = bgs[:, 2, :]
                input_bg1 = changed_shape(input_bg1, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                    sub_Config.bg_sizes[0][1], sub_Config.bg_sizes[0][2]
                ])
                input_bg2 = changed_shape(input_bg2, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                    sub_Config.bg_sizes[1][1], sub_Config.bg_sizes[1][2]
                ])
                input_bg3 = changed_shape(input_bg3, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                    sub_Config.bg_sizes[2][1], sub_Config.bg_sizes[2][2]
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss, merge_op, y],
                    feed_dict={
                        x1: images1,
                        x2: images2,
                        x3: images3,
                        bg1: input_bg1,
                        bg2: input_bg2,
                        bg3: input_bg3,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g' % \
                      (i, loss_value, accuracy_value, validation_loss, validation_accuracy)
                val_writer.add_summary(summary, i)
        writer.close()
        val_writer.close()
コード例 #8
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    if sub_Config.NEED_MUL:
        tf.summary.image(
            'input_x',
            x * 120,
            max_outputs=5
        )
    else:
        tf.summary.image(
            'input_x',
            x
        )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y,
            labels=tf.cast(y_, tf.int32)
        )
    ) + tf.add_n(tf.get_collection('losses'))
    tf.summary.scalar(
        'loss',
        loss
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss,
        global_step=global_step
    )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            # labels[labels == 1] = 0
            # labels[labels == 3] = 0
            # labels[labels == 4] = 1
            # labels[labels == 2] = 1
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=global_step_value
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                save_weights(save_model_path+'model_weights.npy', [
                    'conv1_1',
                    'conv2_1',
                    'conv3_1',
                    'fc1',
                    'fc2'
                ])
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        1
                    ]
                )
                # validation_labels[validation_labels == 1] = 0
                # validation_labels[validation_labels == 3] = 0
                # validation_labels[validation_labels == 4] = 1
                # validation_labels[validation_labels == 2] = 1
                validation_accuracy, summary, logits = sess.run(
                    [accuracy_tensor, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is, accuracy is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_accuracy)
        writer.close()
        val_writer.close()
コード例 #9
0
def train(dataset):
    x1 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                        ],
                        name='input_x1')
    x2 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                        ],
                        name='input_x2')
    x3 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                        ],
                        name='input_x3')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference_parllel([x1, x2, x3], regularizer)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.cast(y_, tf.int32))) + tf.add_n(
                tf.get_collection('losses'))
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(loss=loss)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver.restore(sess, sub_Config.MODEL_SAVE_PATH)  # 加载模型
        validation_images, validation_labels = dataset.get_validation_images_labels(
        )
        images1 = validation_images[:, 0, :]
        images2 = validation_images[:, 1, :]
        images3 = validation_images[:, 2, :]
        images1 = changed_shape(images1, [
            len(validation_images), sub_Config.sizes[0][0],
            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
        ])
        images2 = changed_shape(images2, [
            len(validation_images), sub_Config.sizes[1][0],
            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
        ])
        images3 = changed_shape(images3, [
            len(validation_images), sub_Config.sizes[2][0],
            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
        ])
        validation_accuracy, validation_loss, summary, logits = sess.run(
            [accuracy_tensor, loss, merge_op, y],
            feed_dict={
                x1: images1,
                x2: images2,
                x3: images3,
                y_: validation_labels
            })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
コード例 #10
0
ファイル: resnet_train.py プロジェクト: UpCoder/MedicalImage
def train(train_generator, val_generator, logits, images_tensor, labeles):
    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    val_step = tf.get_variable('val_step', [],
                               initializer=tf.constant_initializer(0),
                               trainable=False)
    loss_ = loss(logits, tf.cast(labeles, tf.int64))
    predictions = tf.nn.softmax(logits)
    top1_error = top_k_error(predictions, labeles, k=1)

    # loss_avg
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_]))
    tf.summary.scalar('loss_avg', ema.average(loss_))

    # validation stats
    ema = tf.train.ExponentialMovingAverage(0.9, val_step)
    val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
    top1_error_avg = ema.average(top1_error)
    tf.summary.scalar('val_top1_error_avg', top1_error_avg)

    tf.summary.scalar('learning_rate', FLAGS.learning_rate)

    opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM)
    grads = opt.compute_gradients(loss_)
    for grad, var in grads:
        if grad is not None and not FLAGS.minimal_summaries:
            tf.summary.histogram(var.op.name + '/gradients', grad)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    if not FLAGS.minimal_summaries:
        # Display the training images in the visualizer.
        tf.summary.image('images', images)

        for var in tf.trainable_variables():
            tf.summary.image(var.op.name, var)

    batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, batchnorm_updates_op)

    saver = tf.train.Saver(tf.all_variables())

    summary_op = tf.summary.merge_all()

    init = tf.initialize_all_variables()

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(FLAGS.log_val_dir, sess.graph)
    if FLAGS.resume:
        latest = tf.train.latest_checkpoint(FLAGS.save_model_path)
        if not latest:
            print "No checkpoint to continue from in", FLAGS.train_dir
            sys.exit(1)
        print "resume", latest
        saver.restore(sess, latest)

    for x in xrange(FLAGS.max_steps + 1):
        start_time = time.time()

        step = sess.run(global_step)
        # train_images, train_labels = train_generator.get_next_batch(net_Config.BATCH_SIZE, net_Config.BATCH_DISTRIBUTION)
        train_images, train_labels = train_generator.get_next_batch(None, None)
        train_images = changed_shape(
            train_images,
            [len(train_images), net_Config.IMAGE_W, net_Config.IMAGE_W, 1])
        write_summary = step % 100 and step > 1
        i = [train_op, loss_]
        i.append(summary_op)
        i.append(labeles)
        o = sess.run(i, {images_tensor: train_images, labeles: train_labels})
        loss_value = o[1]

        duration = time.time() - start_time

        assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

        if step % 5 == 0:
            top1_error_value = sess.run(top1_error,
                                        feed_dict={
                                            images_tensor: train_images,
                                            labeles: train_labels
                                        })
            examples_per_sec = FLAGS.batch_size / float(duration)
            format_str = (
                'step %d loss = %.2f, accuracy = %g (%.1f examples/sec; %.3f '
                'sec/batch)')
            print(format_str % (step, loss_value, top1_error_value,
                                examples_per_sec, duration))
        if write_summary:
            summary_str = o[2]
            summary_writer.add_summary(summary_str, step)

        # Save the model checkpoint periodically.
        if step > 1 and step % 100 == 0:
            checkpoint_path = os.path.join(FLAGS.save_model_path, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=global_step)

        # Run validation periodically
        if step > 1 and step % 100 == 0:
            val_images, val_labels = val_generator.get_next_batch(
                net_Config.BATCH_SIZE, net_Config.BATCH_DISTRIBUTION)
            val_images = changed_shape(
                val_images,
                [len(val_images), net_Config.IMAGE_W, net_Config.IMAGE_W, 1])
            _, top1_error_value, summary_value = sess.run(
                [val_op, top1_error, summary_op], {
                    images_tensor: val_images,
                    labeles: val_labels
                })
            print('Validation accuracy %.2f' % top1_error_value)
            val_summary_writer.add_summary(summary_value, step)
コード例 #11
0
ファイル: train_fc.py プロジェクト: UpCoder/MedicalImage
 def start_train(self):
     y_ = tf.placeholder(tf.float32, [None, net_config.OUTPUT_NODE])
     # avg_image = calu_average_train_set(config.TRAIN_DATA_DIR, [net_config.IMAGE_W, net_config.IMAGE_H])
     y = self.vgg.fcs_output
     global_step = tf.Variable(0, trainable=False)
     variable_averages = tf.train.ExponentialMovingAverage(
         net_config.MOVEING_AVERAGE_DECAY, global_step)
     variable_averages_op = variable_averages.apply(
         tf.trainable_variables())
     loss = calculate_loss(logits=y, labels=y_)
     tf.summary.scalar('loss', loss)
     train_step = tf.train.GradientDescentOptimizer(
         learning_rate=self.learning_rate).minimize(loss,
                                                    global_step=global_step)
     with tf.control_dependencies([train_step, variable_averages_op]):
         train_op = tf.no_op(name='train')
     # 计算准确率
     accuracy_tensor = calculate_accuracy(logits=y, labels=y_)
     merged = tf.summary.merge_all()
     max_accuracy = 0.0
     saver = tf.train.Saver()
     with tf.Session() as sess:
         sess.run(tf.global_variables_initializer())
         log_path = './log/train'
         val_log_path = './log/val'
         if self.load_model_path:
             saver.restore(sess, self.load_model_path)
         writer = tf.summary.FileWriter(log_path, tf.get_default_graph())
         val_writer = tf.summary.FileWriter(val_log_path,
                                            tf.get_default_graph())
         for i in range(self.iterator_number):
             train_images, labels = self.dataset.get_next_train_batch(
                 net_config.TRAIN_BATCH_SIZE)
             labels = self.one_hot_encoding(labels)
             train_images = changed_shape(train_images, [
                 len(train_images), net_config.IMAGE_W, net_config.IMAGE_W,
                 1
             ])
             feed_dict = {self.vgg.imgs: train_images, y_: labels}
             _, loss_value, accuracy_value, summary, y_value, global_step_value = sess.run(
                 [train_op, loss, accuracy_tensor, merged, y, global_step],
                 feed_dict=feed_dict)
             if i % 500 == 0 and i != 0 and self.save_model_path is not None:
                 # 保存模型
                 print 'save model successful', self.save_model_path
                 saver.save(sess, self.save_model_path)
             writer.add_summary(summary, i)
             if (i % 40) == 0 and i != 0:
                 val_images, labels = self.dataset.get_next_val_batch(
                     net_config.TRAIN_BATCH_SIZE)
                 val_images = changed_shape(val_images, [
                     len(val_images), net_config.IMAGE_W,
                     net_config.IMAGE_W, 1
                 ])
                 labels = self.one_hot_encoding(labels)
                 feed_dict = {self.vgg.imgs: val_images, y_: labels}
                 val_loss, val_accuracy, summary = sess.run(
                     [loss, accuracy_tensor, merged], feed_dict=feed_dict)
                 val_writer.add_summary(summary, i)
                 print '-'*15, 'global_step is %d, train loss value is %g, accuracy is %g, val loss is %g, val accuracy is %g' % \
                               (global_step_value, loss_value, accuracy_value, val_loss, val_accuracy), '-'*15
             del train_images, labels
             gc.collect()
     writer.close()
     val_writer.close()
コード例 #12
0
ファイル: fine_tuning_2.py プロジェクト: UpCoder/MedicalImage
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x, is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss_ = loss(
        logits=y,
        labels=tf.cast(y_, np.int32)

    )
    tf.summary.scalar(
        'loss',
        loss_
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss_,
        global_step=global_step
    )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            # labels[labels == 1] = 0
            # labels[labels == 3] = 0
            # labels[labels == 4] = 1
            # labels[labels == 2] = 1
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=global_step_value
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        1
                    ]
                )
                # validation_labels[validation_labels == 1] = 0
                # validation_labels[validation_labels == 3] = 0
                # validation_labels[validation_labels == 4] = 1
                # validation_labels[validation_labels == 2] = 1
                validation_accuracy, summary, logits = sess.run(
                    [accuracy_tensor, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is, accuracy is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_accuracy)
        writer.close()
        val_writer.close()