示例#1
0
def val(val_data_set, load_model_path, phases_names):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL * len(phases_names)
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    global_step = tf.Variable(0, trainable=False)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        phase_names=phases_names,
                        num_blocks=3)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_labels = val_data_set.get_next_batch(
            None, None)

        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
        binary_acc = acc_binary_acc(
            logits=np.argmax(logits, 1),
            label=validation_labels,
        )
        print 'accuracy is %g, binary_acc is %g' % \
              (validation_accuracy, binary_acc)
示例#2
0
def get_probas_OurMethod():
    from Net.forpatch.ResNetMultiPhaseMultiScale.classification_heatingmap import generate_features_labels
    train_features, train_labels, val_features, val_labels = \
        generate_features_labels(
            '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/heatingmap/crossvalidation/0')
    clf = SVC(C=0.125, gamma=8, probability=True)
    clf.fit(train_features, train_labels)
    probas_ = clf.predict_proba(val_features)
    predictedlabel = np.argmax(probas_, axis=1)
    print predictedlabel
    from Tools import calculate_acc_error
    calculate_acc_error(predictedlabel, val_labels)
    val_labels = label_binarize(val_labels, [0, 1, 2, 3])
    return probas_, val_labels
示例#3
0
def val(val_data_set, load_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    global_step = tf.Variable(0, trainable=False)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())

        validation_images, validation_labels = val_data_set.images, val_data_set.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        # validation_labels[validation_labels == 1] = 0
        # validation_labels[validation_labels == 3] = 0
        # validation_labels[validation_labels == 4] = 1
        # validation_labels[validation_labels == 2] = 1
        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        recall, precision, f1_score = get_game_evaluate(
            np.argmax(logits, 1), validation_labels)
        validation_labels = np.array(validation_labels)
        print 'label=0 %d, label=1 %d' % (np.sum(validation_labels == 0),
                                          np.sum(validation_labels == 1))
        print 'recall is %g, precision is %g, f1_score is %g' % (
            recall, precision, f1_score)
        print 'accuracy is %g' % \
              (validation_accuracy)
        return error_indexs, error_record
示例#4
0
def train(val_data_set, load_model_path):
    xs = []
    for index in range(len(sub_Config.SIZES)):
        xs.append(
            tf.placeholder(tf.float32,
                           shape=[
                               None, sub_Config.SIZES[index][0],
                               sub_Config.SIZES[index][1]
                           ]))
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(xs,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_labels = val_data_set.images, val_data_set.labels
        feed_dict = {}
        for index, _ in enumerate(sub_Config.SIZES):
            feed_dict[xs[index]] = validation_images[index]
        feed_dict[y_] = validation_labels
        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict=feed_dict)
        _, _, _, error_index, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        print 'accuracy is %g' % \
              (validation_accuracy)
        return error_index, error_record
示例#5
0
def val(val_data_set, load_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    # global_step = tf.Variable(0, trainable=False)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)
        validation_images, validation_labels = val_data_set.images, val_data_set.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        validation_accuracy, validation_loss, logits = sess.run(
            [accuracy_tensor, loss_, y],
            feed_dict={
                x: validation_images,
                y_: validation_labels
            })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        print 'validation loss value is %g, accuracy is %g' % \
              (validation_loss, validation_accuracy)
        return error_indexs, error_record
示例#6
0
def val(dataset, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        validation_images, validation_labels = dataset.images, dataset.labels
        validation_images = changed_shape(validation_images, [
            len(validation_images), sub_Config.IMAGE_W, sub_Config.IMAGE_W, 1
        ])
        validation_accuracy, logits = sess.run([accuracy_tensor, y],
                                               feed_dict={
                                                   x: validation_images,
                                                   y_: validation_labels
                                               })
        _, _, _, error_indexs, error_record = calculate_acc_error(
            logits=np.argmax(logits, 1), label=validation_labels, show=True)
        print 'accuracy is %g' % \
              (validation_accuracy)
        return error_indexs, error_record
示例#7
0
def train(logits,
          images_tensor,
          labels_tensor,
          is_training_tensor,
          save_model_path=None,
          step_width=100):
    train_dataset = DataSet(
        '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/ICIP/train',
        'train',
        rescale=True,
        divied_liver=False,
        expand_is_roi=True,
        full_roi_path=
        '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/train'
    )
    val_dataset = DataSet(
        '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/ICIP/val',
        'val',
        rescale=True,
        divied_liver=False,
        expand_is_roi=True,
        full_roi_path=
        '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/train'
    )

    train_batchdata = train_dataset.get_next_batch(net_config.BATCH_SIZE)
    val_batchdata = val_dataset.get_next_batch(net_config.BATCH_SIZE)

    global_step = tf.get_variable('global_step', [],
                                  initializer=tf.constant_initializer(0),
                                  trainable=False)
    val_step = tf.get_variable('val_step', [],
                               initializer=tf.constant_initializer(0),
                               trainable=False)
    loss_ = loss(logits, labels_tensor)
    predictions = tf.nn.softmax(logits)
    print 'predictions shape is ', predictions
    print 'label is ', labels_tensor
    top1_error = top_k_error(predictions, labels_tensor, 1)
    labels_onehot = tf.one_hot(labels_tensor, logits.get_shape().as_list()[-1])
    print 'output node is ', logits.get_shape().as_list()[-1]
    accuracy_tensor = calculate_accuracy(predictions, labels_onehot)

    # loss_avg
    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    tf.add_to_collection(UPDATE_OPS_COLLECTION, ema.apply([loss_]))
    tf.summary.scalar('loss_avg', ema.average(loss_))

    # validation stats
    ema = tf.train.ExponentialMovingAverage(0.9, val_step)
    val_op = tf.group(val_step.assign_add(1), ema.apply([top1_error]))
    top1_error_avg = ema.average(top1_error)
    tf.summary.scalar('val_top1_error_avg', top1_error_avg)

    tf.summary.scalar('learning_rate', FLAGS.learning_rate)

    opt = tf.train.MomentumOptimizer(FLAGS.learning_rate, MOMENTUM)
    grads = opt.compute_gradients(loss_)
    for grad, var in grads:
        if grad is not None and not FLAGS.minimal_summaries:
            tf.summary.histogram(var.op.name + '/gradients', grad)
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    if not FLAGS.minimal_summaries:
        # Display the training images in the visualizer.
        tf.summary.image('images', images_tensor)

        for var in tf.trainable_variables():
            tf.summary.image(var.op.name, var)

    batchnorm_updates = tf.get_collection(UPDATE_OPS_COLLECTION)
    batchnorm_updates_op = tf.group(*batchnorm_updates)
    train_op = tf.group(apply_gradient_op, batchnorm_updates_op)

    saver = tf.train.Saver(tf.all_variables())

    summary_op = tf.summary.merge_all()

    init = tf.initialize_all_variables()

    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)

    summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(FLAGS.log_val_dir, sess.graph)
    if FLAGS.resume:
        latest = tf.train.latest_checkpoint(FLAGS.load_model_path)
        if not latest:
            print "No checkpoint to continue from in", FLAGS.train_dir
            sys.exit(1)
        print "resume", latest
        saver.restore(sess, latest)

    for x in xrange(FLAGS.max_steps + 1):
        start_time = time.time()

        step = sess.run(global_step)
        i = [train_op, loss_]

        write_summary = step % 100 and step > 1
        if write_summary:
            i.append(summary_op)
        train_roi_batch_images, train_labels = train_batchdata.next()
        o = sess.run(
            i,
            feed_dict={
                images_tensor: train_roi_batch_images,
                # expand_images_tensor: train_expand_roi_batch_images,
                labels_tensor: train_labels,
                is_training_tensor: True
            })

        loss_value = o[1]

        duration = time.time() - start_time

        assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

        if (step - 1) % step_width == 0:
            top1_error_value, accuracy_value, labels_values, predictions_values = sess.run(
                [top1_error, accuracy_tensor, labels_tensor, predictions],
                feed_dict={
                    images_tensor: train_roi_batch_images,
                    # expand_images_tensor: train_expand_roi_batch_images,
                    labels_tensor: train_labels,
                    is_training_tensor: True
                })
            predictions_values = np.argmax(predictions_values, axis=1)
            examples_per_sec = FLAGS.batch_size / float(duration)
            # accuracy = eval_accuracy(predictions_values, labels_values)
            format_str = (
                'step %d, loss = %.2f, top1 error = %g, accuracy value = %g  (%.1f examples/sec; %.3f '
                'sec/batch)')

            print(format_str % (step, loss_value, top1_error_value,
                                accuracy_value, examples_per_sec, duration))
        if write_summary:
            summary_str = o[2]
            summary_writer.add_summary(summary_str, step)

        # Save the model checkpoint periodically.
        if step > 1 and step % step_width == 0:

            checkpoint_path = os.path.join(save_model_path, 'model.ckpt')
            saver.save(sess, checkpoint_path, global_step=global_step)
            save_dir = os.path.join(save_model_path, str(step))
            if not os.path.exists(save_dir):
                os.mkdir(save_dir)
            filenames = glob(
                os.path.join(save_model_path,
                             '*-' + str(int(step + 1)) + '.*'))
            for filename in filenames:
                shutil.copy(filename,
                            os.path.join(save_dir, os.path.basename(filename)))
        # Run validation periodically
        if step > 1 and step % step_width == 0:
            val_roi_batch_images, val_labels = val_batchdata.next()
            _, top1_error_value, summary_value, accuracy_value, labels_values, predictions_values = sess.run(
                [
                    val_op, top1_error, summary_op, accuracy_tensor,
                    labels_tensor, predictions
                ],
                {
                    images_tensor: val_roi_batch_images,
                    # expand_images_tensor: val_expand_roi_batch_images,
                    labels_tensor: val_labels,
                    is_training_tensor: False
                })
            predictions_values = np.argmax(predictions_values, axis=1)
            # accuracy = eval_accuracy(predictions_values, labels_values)
            calculate_acc_error(logits=predictions_values,
                                label=labels_values,
                                show=True)
            print('Validation top1 error %.2f, accuracy value %f' %
                  (top1_error_value, accuracy_value))
            val_summary_writer.add_summary(summary_value, step)
示例#8
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=5,
                             point_phase=[2],
                             is_training=is_training_tensor,
                             batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/Patch_ROI/models/500.0'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MedicalImage/MedicalImage/Patches/ICIP/only-patch/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3, 4]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(
                load_patch(
                    path,
                    return_roi=True,
                    parent_dir=
                    '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/val'
                )) for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        cur_liver_densitys = [
            liver_density[os.path.basename(path)
                          [:os.path.basename(path).rfind('_')]]
            for path in cur_paths
        ]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)
示例#9
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(
            loss=loss_, global_step=global_step)
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter('./log/fine_tuning/train',
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val',
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.get_next_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images = changed_shape(images, [
                len(images), sub_Config.IMAGE_W, sub_Config.IMAGE_W,
                sub_Config.IMAGE_CHANNEL
            ])
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=global_step_value)
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                import os
                saveedpath = os.path.join(save_model_path,
                                          str(global_step_value))
                if not os.path.exists(saveedpath):
                    os.mkdir(saveedpath)
                saveedpath += '/model.ckpt'
                saver.save(sess, saveedpath, global_step=global_step_value)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.get_next_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                validation_images = changed_shape(validation_images, [
                    len(validation_images), sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W, 1
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss_, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                binary_acc = acc_binary_acc(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
        writer.close()
        val_writer.close()
示例#10
0
def train(dataset, load_model_path, save_model_path, train_log_dir, val_log_dir):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    # global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # variable_averages_op = variable_average.apply(tf.trainable_variables())
    regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y,
            labels=tf.cast(y_, tf.int32)
        )
    ) + tf.add_n(tf.get_collection('losses'))
    tf.summary.scalar(
        'loss',
        loss
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss,
        # global_step=global_step
    )
    # with tf.control_dependencies([train_step, variable_averages_op]):
    #     train_op = tf.no_op(name='train')
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter(train_log_dir, tf.get_default_graph())
        val_writer = tf.summary.FileWriter(val_log_dir, tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = dataset.get_next_train_batch(sub_Config.BATCH_SIZE)
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            _, loss_value, accuracy_value, summary = sess.run(
                [train_op, loss, accuracy_tensor, merge_op],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=i
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                save_model_path = save_model_path+'_' + str(i)
                import os
                if not os.path.exists(save_model_path):
                    os.mkdir(save_model_path)
                save_model_path += '/'
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = dataset.get_next_val_batch(sub_Config.BATCH_SIZE)
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_CHANNEL
                    ]
                )
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, i)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g' % \
                      (i, loss_value, accuracy_value, validation_loss, validation_accuracy)
        writer.close()
        val_writer.close()
示例#11
0
def train(dataset, load_model=False):
    x1 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                        ],
                        name='input_x1')
    x2 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                        ],
                        name='input_x2')
    x3 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                        ],
                        name='input_x3')

    bg1 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                             sub_Config.bg_sizes[0][1],
                             sub_Config.bg_sizes[0][2]
                         ],
                         name='input_bg1')
    bg2 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                             sub_Config.bg_sizes[1][1],
                             sub_Config.bg_sizes[1][2]
                         ],
                         name='input_bg2')
    bg3 = tf.placeholder(tf.float32,
                         shape=[
                             sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                             sub_Config.bg_sizes[2][1],
                             sub_Config.bg_sizes[2][2]
                         ],
                         name='input_bg3')
    tf.summary.image('input_x1', x1, max_outputs=5)
    tf.summary.image('input_x2', x2, max_outputs=5)
    tf.summary.image('input_x2', x3, max_outputs=5)
    tf.summary.image('input_bg1', bg1, max_outputs=5)
    tf.summary.image('input_bg2', bg2, max_outputs=5)
    tf.summary.image('input_bg3', bg3, max_outputs=5)
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference([x1, x2, x3], [bg1, bg2, bg3], regularizer)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.cast(y_, tf.int32))) + tf.add_n(
                tf.get_collection('losses'))
    tf.summary.scalar('loss', loss)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(loss=loss)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        if load_model:
            saver.restore(sess, sub_Config.MODEL_SAVE_PATH)
        writer = tf.summary.FileWriter(sub_Config.TRAIN_LOG_DIR,
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter(sub_Config.VAL_LOG_DIR,
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            # images, labels = dataset.train.next_batch(sub_Config.BATCH_SIZE)
            # labels = np.argmax(labels, 1)
            # # print np.shape(labels)
            # images = np.reshape(
            #     images,
            #     [
            #         sub_Config.BATCH_SIZE,
            #         sub_Config.IMAGE_W,
            #         sub_Config.IMAGE_H,
            #         sub_Config.IMAGE_CHANNEL
            #     ]
            # )
            images, labels, bgs = dataset.next_train_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images1 = images[:, 0, :]
            images2 = images[:, 1, :]
            images3 = images[:, 2, :]
            images1 = changed_shape(images1, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                sub_Config.sizes[0][1], sub_Config.sizes[0][2]
            ])
            images2 = changed_shape(images2, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                sub_Config.sizes[1][1], sub_Config.sizes[1][2]
            ])
            images3 = changed_shape(images3, [
                sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                sub_Config.sizes[2][1], sub_Config.sizes[2][2]
            ])

            input_bg1 = bgs[:, 0, :]
            input_bg2 = bgs[:, 1, :]
            input_bg3 = bgs[:, 2, :]
            input_bg1 = changed_shape(input_bg1, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                sub_Config.bg_sizes[0][1], sub_Config.bg_sizes[0][2]
            ])
            input_bg2 = changed_shape(input_bg2, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                sub_Config.bg_sizes[1][1], sub_Config.bg_sizes[1][2]
            ])
            input_bg3 = changed_shape(input_bg3, [
                sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                sub_Config.bg_sizes[2][1], sub_Config.bg_sizes[2][2]
            ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(
                    np.asarray(images3[0, :, :, 0], np.uint8))
                image.show()
            # images = np.reshape(
            #     images[:, :, :, 2],
            #     [
            #         sub_Config.BATCH_SIZE,
            #         sub_Config.IMAGE_W,
            #         sub_Config.IMAGE_W,
            #         1
            #     ]
            # )
            _, loss_value, accuracy_value, summary = sess.run(
                [train_op, loss, accuracy_tensor, merge_op],
                feed_dict={
                    x1: images1,
                    x2: images2,
                    x3: images3,
                    bg1: input_bg1,
                    bg2: input_bg2,
                    bg3: input_bg3,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=i)
            if i % 1000 == 0 and i != 0:
                # 保存模型
                saver.save(sess, sub_Config.MODEL_SAVE_PATH)
            if i % 100 == 0:
                validation_images, validation_labels, bgs = dataset.next_val_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                images1 = validation_images[:, 0, :]
                images2 = validation_images[:, 1, :]
                images3 = validation_images[:, 2, :]
                images1 = changed_shape(images1, [
                    len(validation_images), sub_Config.sizes[0][0],
                    sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                ])
                images2 = changed_shape(images2, [
                    len(validation_images), sub_Config.sizes[1][0],
                    sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                ])
                images3 = changed_shape(images3, [
                    len(validation_images), sub_Config.sizes[2][0],
                    sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                ])
                input_bg1 = bgs[:, 0, :]
                input_bg2 = bgs[:, 1, :]
                input_bg3 = bgs[:, 2, :]
                input_bg1 = changed_shape(input_bg1, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[0][0],
                    sub_Config.bg_sizes[0][1], sub_Config.bg_sizes[0][2]
                ])
                input_bg2 = changed_shape(input_bg2, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[1][0],
                    sub_Config.bg_sizes[1][1], sub_Config.bg_sizes[1][2]
                ])
                input_bg3 = changed_shape(input_bg3, [
                    sub_Config.BATCH_SIZE, sub_Config.bg_sizes[2][0],
                    sub_Config.bg_sizes[2][1], sub_Config.bg_sizes[2][2]
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss, merge_op, y],
                    feed_dict={
                        x1: images1,
                        x2: images2,
                        x3: images3,
                        bg1: input_bg1,
                        bg2: input_bg2,
                        bg3: input_bg3,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g' % \
                      (i, loss_value, accuracy_value, validation_loss, validation_accuracy)
                val_writer.add_summary(summary, i)
        writer.close()
        val_writer.close()
示例#12
0
def val(val_data_set, load_model_path, phases_names):

    x_ROI = tf.placeholder(tf.float32,
                           shape=[
                               None, net_config.ROI_SIZE_W,
                               net_config.ROI_SIZE_H,
                               net_config.IMAGE_CHANNEL * len(phases_names)
                           ],
                           name='input_x')

    x_EXPAND = tf.placeholder(tf.float32,
                              shape=[
                                  None, net_config.EXPAND_SIZE_W,
                                  net_config.EXPAND_SIZE_H,
                                  net_config.IMAGE_CHANNEL * len(phases_names)
                              ])
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small([x_ROI, x_EXPAND],
                        is_training=is_training,
                        num_classes=net_config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        phase_names=phases_names,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_images_expand, validation_labels = val_data_set.get_next_batch(
        )

        validation_accuracy, validation_loss, summary, logits = sess.run(
            [accuracy_tensor, loss_, merge_op, y],
            feed_dict={
                x_ROI: validation_images,
                x_EXPAND: validation_images_expand,
                y_: validation_labels
            })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
        binary_acc = acc_binary_acc(
            logits=np.argmax(logits, 1),
            label=validation_labels,
        )

        print 'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
              (validation_loss, validation_accuracy, binary_acc)
示例#13
0
def train(dataset):
    x1 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[0][0],
                            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
                        ],
                        name='input_x1')
    x2 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[1][0],
                            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
                        ],
                        name='input_x2')
    x3 = tf.placeholder(tf.float32,
                        shape=[
                            sub_Config.BATCH_SIZE, sub_Config.sizes[2][0],
                            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
                        ],
                        name='input_x3')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    regularizer = tf.contrib.layers.l2_regularizer(
        sub_Config.REGULARIZTION_RATE)
    y = inference_parllel([x1, x2, x3], regularizer)
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.cast(y_, tf.int32))) + tf.add_n(
                tf.get_collection('losses'))
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(loss=loss)
    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver.restore(sess, sub_Config.MODEL_SAVE_PATH)  # 加载模型
        validation_images, validation_labels = dataset.get_validation_images_labels(
        )
        images1 = validation_images[:, 0, :]
        images2 = validation_images[:, 1, :]
        images3 = validation_images[:, 2, :]
        images1 = changed_shape(images1, [
            len(validation_images), sub_Config.sizes[0][0],
            sub_Config.sizes[0][1], sub_Config.sizes[0][2]
        ])
        images2 = changed_shape(images2, [
            len(validation_images), sub_Config.sizes[1][0],
            sub_Config.sizes[1][1], sub_Config.sizes[1][2]
        ])
        images3 = changed_shape(images3, [
            len(validation_images), sub_Config.sizes[2][0],
            sub_Config.sizes[2][1], sub_Config.sizes[2][2]
        ])
        validation_accuracy, validation_loss, summary, logits = sess.run(
            [accuracy_tensor, loss, merge_op, y],
            feed_dict={
                x1: images1,
                x2: images2,
                x3: images3,
                y_: validation_labels
            })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
示例#14
0
                {is_training: False})
            acc_sum += top1_error_value
            logits_value.extend(batch_logits_value)
            labels_value.extend(batch_labels_value)
            if not is_testing:
                print('Validation top%d error %.2f, count is %d' %
                      (k, top1_error_value, count))
        except Exception, _:
            break
    if not is_testing:
        print 'The top%d error on the total validation set: %f' % (k, acc_sum /
                                                                   count)
        print labels_value
        print np.shape(logits_value)
        error_dict, error_dict_record, error_rate, error_index, error_record = calculate_acc_error(
            logits=np.argmax(logits_value, axis=1),
            label=labels_value,
            show=True)
        print('accuracy value %f' % (1 - error_rate))
        if roi_paths is not None:
            print 'Error files are : '

            for index in error_index:
                # print os.path.dirname(roi_paths[index]), '->', os.path.join('/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/ROI/delete_val_0', os.path.basename(os.path.dirname(roi_paths[index])))
                # shutil.move(
                #     os.path.dirname(roi_paths[index]),
                #     os.path.join('/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/ROI/delete_val_0', os.path.basename(os.path.dirname(roi_paths[index])))
                # )
                print roi_paths[index], np.argmax(logits_value[index])
    if is_testing:
        prediction_flags = np.argmax(logits_value, axis=1)
示例#15
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x, is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss_ = loss(
        logits=y,
        labels=tf.cast(y_, np.int32)

    )
    tf.summary.scalar(
        'loss',
        loss_
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss_,
        global_step=global_step
    )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            # labels[labels == 1] = 0
            # labels[labels == 3] = 0
            # labels[labels == 4] = 1
            # labels[labels == 2] = 1
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=global_step_value
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        1
                    ]
                )
                # validation_labels[validation_labels == 1] = 0
                # validation_labels[validation_labels == 3] = 0
                # validation_labels[validation_labels == 4] = 1
                # validation_labels[validation_labels == 2] = 1
                validation_accuracy, summary, logits = sess.run(
                    [accuracy_tensor, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is, accuracy is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_accuracy)
        writer.close()
        val_writer.close()
                train_labels.extend([type] * len(cur_features))
            else:
                val_features.extend(cur_features)
                val_labels.extend([type] * len(cur_features))
    scio.savemat(
        'data.mat', {
            'train_features': train_features,
            'train_labels': train_labels,
            'val_features': val_features,
            'val_labels': val_labels
        })
    return train_features, train_labels, val_features, val_labels


if __name__ == '__main__':
    train_features, train_labels, val_features, val_labels = \
        generate_features_labels('/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/heatingmap/ICIP/3-classes/only_patch')
    print np.shape(train_features), np.shape(train_labels), np.shape(
        val_features), np.shape(val_labels)
    from BoVW.classification import SVM, LinearSVM, KNN
    # predicted_label = SVM.do(train_features, train_labels, val_features, val_labels, adjust_parameters=True)
    predicted_label, c_params, g_params, accs = SVM.do(train_features,
                                                       train_labels,
                                                       val_features,
                                                       val_labels,
                                                       adjust_parameters=True)
    print predicted_label
    calculate_acc_error(predicted_label, val_labels)

    # from tools.plot3D import draw_3d
    # draw_3d(c_params, g_params, accs, 'log c', 'log g', '%')
示例#17
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    if sub_Config.NEED_MUL:
        tf.summary.image(
            'input_x',
            x * 120,
            max_outputs=5
        )
    else:
        tf.summary.image(
            'input_x',
            x
        )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    y = inference(x, regularizer)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y,
            labels=tf.cast(y_, tf.int32)
        )
    ) + tf.add_n(tf.get_collection('losses'))
    tf.summary.scalar(
        'loss',
        loss
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss,
        global_step=global_step
    )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            # labels[labels == 1] = 0
            # labels[labels == 3] = 0
            # labels[labels == 4] = 1
            # labels[labels == 2] = 1
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=global_step_value
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                save_weights(save_model_path+'model_weights.npy', [
                    'conv1_1',
                    'conv2_1',
                    'conv3_1',
                    'fc1',
                    'fc2'
                ])
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        1
                    ]
                )
                # validation_labels[validation_labels == 1] = 0
                # validation_labels[validation_labels == 3] = 0
                # validation_labels[validation_labels == 4] = 1
                # validation_labels[validation_labels == 2] = 1
                validation_accuracy, summary, logits = sess.run(
                    [accuracy_tensor, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is, accuracy is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_accuracy)
        writer.close()
        val_writer.close()