Пример #1
0
def main(_):
    [train_images, train_expand_images,
     train_labels], [val_images, val_expand_images, val_labels
                     ] = distorted_inputs(target_labels=[0, 1, 2, 3, 4],
                                          true_labels=[0, 1, 2, 3, 4])
    # print train_images
    is_training = tf.placeholder('bool', [], name='is_training')
    images, expand_images, labels = tf.cond(
        is_training, lambda: (train_images, train_expand_images, train_labels),
        lambda: (val_images, val_expand_images, val_labels))
    # with tf.Session() as sess:
    #     tf.train.start_queue_runners(sess=sess)
    #     one_hot_label = tf.one_hot(tf.cast(labels, tf.uint8), depth=5)
    #
    #     print sess.run(labels, feed_dict={is_training: True})
    #     print np.shape(sess.run(one_hot_label, feed_dict={is_training: True}))
    print labels
    logits = inference_small(
        images,
        expand_images,
        phase_names=['NC', 'ART', 'PV'],
        num_classes=5,
        is_training=True,
    )
    print labels
    save_model_path = '/home/give/PycharmProjects/MedicalImage/Net/BaseNet/ResNetMultiPhaseExpand/models'
    train(is_training,
          logits,
          images,
          expand_images,
          labels,
          save_model_path=save_model_path,
          step_width=50)
Пример #2
0
def main(_):
    roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.EXPAND_SIZE_W,
        net_config.EXPAND_SIZE_H, net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    labels_tensor = tf.placeholder(shape=[None], dtype=np.int32)
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=4,
                             is_training=is_training_tensor,
                             point_phase=[2])
    save_model_path = '/home/give/PycharmProjects/ICPR2018/DeepLearning/Patch_ROI/models'
    train(logits,
          roi_images,
          expand_roi_images,
          labels_tensor,
          is_training_tensor,
          save_model_path=save_model_path,
          step_width=20,
          record_loss=False)
Пример #3
0
def main(_):
    roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.EXPAND_SIZE_W,
        net_config.EXPAND_SIZE_H, net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    labels_tensor = tf.placeholder(shape=[None], dtype=np.int32)
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits, local_output_tensor, global_output_tensor, represent_feature_tensor = inference_small(
        roi_images,
        expand_roi_images,
        phase_names=['NC', 'ART', 'PV'],
        num_classes=4,
        is_training=is_training_tensor)
    save_model_path = '/home/give/PycharmProjects/MICCAI2018/deeplearning/Co-Occurrence/parameters/1'
    train(logits,
          local_output_tensor,
          global_output_tensor,
          represent_feature_tensor,
          roi_images,
          expand_roi_images,
          labels_tensor,
          is_training_tensor,
          save_model_path=save_model_path,
          step_width=100)
Пример #4
0
def evaluate():
    """Eval CIFAR-10 for a number of steps."""
    with tf.Graph().as_default() as g:
        # Get images and labels for CIFAR-10.
        eval_data = FLAGS.eval_data == 'test'
        images, labels = cifar10_input.inputs(eval_data, FLAGS.data_dir,
                                              FLAGS.batch_size)

        # Build a Graph that computes the logits predictions from the
        # inference model.
        #logits = inference(images, is_training=False)
        logits = inference_small(images, is_training=False, num_blocks=9)

        # Calculate predictions.
        top_k_op = tf.nn.in_top_k(logits, labels, 1)

        # Restore the moving average version of the learned variables for eval.
        #variable_averages = tf.train.ExponentialMovingAverage(
        #    cifar10.MOVING_AVERAGE_DECAY)
        #variables_to_restore = variable_averages.variables_to_restore()
        #saver = tf.train.Saver(variables_to_restore)
        saver = tf.train.Saver()

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)

        while True:
            eval_once(saver, summary_writer, top_k_op, summary_op)
            if FLAGS.run_once:
                break
            time.sleep(FLAGS.eval_interval_secs)
Пример #5
0
def main(_):
    roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        net_config.BATCH_SIZE, net_config.EXPAND_SIZE_W,
        net_config.EXPAND_SIZE_H, net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    labels_tensor = tf.placeholder(shape=[None], dtype=np.int32)
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=4,
                             is_training=is_training_tensor)
    save_model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/ResNetMultiPhaseMultiScale/model/parallel'
    train(logits,
          roi_images,
          expand_roi_images,
          labels_tensor,
          is_training_tensor,
          save_model_path=save_model_path,
          step_width=100)
Пример #6
0
def main(argv=None):  # pylint: disable=unused-argument
    maybe_download_and_extract()

    images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size)
    images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size)
    is_training = tf.placeholder('bool', [], name='is_training')
    images, labels = tf.cond(is_training,
        lambda: (images_train, labels_train),
        lambda: (images_val, labels_val))
    logits = inference_small(images,
                             num_classes=10,
                             is_training=is_training,
                             use_bias=(not FLAGS.use_bn),
                             num_blocks=3)
    train(is_training, logits, images, labels)
Пример #7
0
def main(_):
    images, expand_images, labels = distorted_inputs(
        target_labels=[0, 1, 2, 3], true_labels=[0, 1, 2, 3])
    is_training = tf.placeholder('bool', [], name='is_training')
    logits = inference_small(
        images,
        expand_images,
        phase_names=['NC', 'ART', 'PV'],
        num_classes=4,
        is_training=False,
    )
    roi_outputs = generate_paths(parent_dir,
                                 'val',
                                 target_labels=[0, 1, 2, 3, 4],
                                 true_labels=[0, 1, 2, 3, 4])
    val(is_training, logits, images, labels, k=1, roi_paths=roi_outputs[0])
Пример #8
0
def main(argv=None):  # pylint: disable=unused-argument
    maybe_download_and_extract()

    images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size)
    images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size)

    is_training = tf.placeholder('bool', [], name='is_training')

    images, labels = tf.cond(is_training,
        lambda: (images_train, labels_train),
        lambda: (images_val, labels_val))

    logits = inference_small(images,
                             num_classes=10,
                             is_training=is_training,
                             use_bias=(not FLAGS.use_bn),
                             num_blocks=3)
    train(is_training, logits, images, labels)
Пример #9
0
def main(argv=None):  # pylint: disable=unused-argument
    maybe_download_and_extract()

    # different input behaviors for training and testing
    # via seperated ops
    # At training, distorted_input shuffles, distorts, and augments training set
    # At testing, inputs just normally reads in testing set.
    # Then, use a is_training tensor to switch between the two branches.
    images_train, labels_train = distorted_inputs(FLAGS.data_dir,
                                                  FLAGS.batch_size)
    images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size)

    is_training = tf.placeholder('bool', [], name='is_training')

    images, labels = tf.cond(is_training, lambda: (images_train, labels_train),
                             lambda: (images_val, labels_val))

    logits = inference_small(images,
                             num_classes=10,
                             is_training=is_training,
                             use_bias=(not FLAGS.use_bn),
                             num_blocks=3)
    train(is_training, logits, images, labels)
Пример #10
0
def run_for_resnet_train():
    from Net.BaseNet.ResNet.resnet_train import train
    phase_name = 'ART'
    traindatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROIMulti/train'
    valdatapath = '/home/give/Documents/dataset/MedicalImage/MedicalImage/ROIMulti/val'
    val_dataset = ValDataSet(new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
                             phase=phase_name,
                             category_number=sub_Config.OUTPUT_NODE,
                             data_path=valdatapath)
    train_dataset = ValDataSet(
        new_size=[sub_Config.IMAGE_W, sub_Config.IMAGE_H],
        phase=phase_name,
        category_number=sub_Config.OUTPUT_NODE,
        data_path=traindatapath)
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    logits = inference_small(x,
                             is_training=is_training,
                             num_classes=sub_Config.OUTPUT_NODE,
                             use_bias=FLAGS.use_bn,
                             num_blocks=3)
    train(train_generator=train_dataset,
          val_generator=val_dataset,
          logits=logits,
          images_tensor=x,
          labeles=y_)
Пример #11
0
def train():
    global parameters
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=FLAGS.log_device_placement)

    with tf.Graph().as_default(), tf.device("/cpu:0"):
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        device_ids = FLAGS.device_ids
        if not device_ids:
            device_ids = [str(i) for i in range(FLAGS.num_gpus)]
        else:
            device_ids = device_ids.split(',')

        if len(device_ids) > FLAGS.num_gpus:
            print(
                'The device_ids should have the same number of GPUs with num_gpus'
            )
            return

        lr = 0.1
        #optimizer = tf.train.GradientDescentOptimizer(lr)
        optimizer = tf.train.MomentumOptimizer(lr, 0.9)

        tower_grads = []
        average_loss_tensor = []
        for i in xrange(FLAGS.num_gpus):
            print('what is i: ', i)
            with tf.device('/gpu:%s' % device_ids[i]):
                with tf.name_scope('%s_%s' %
                                   ('TOWER', device_ids[i])) as n_scope:
                    images, labels = cifar10_input.inputs(
                        False, FLAGS.data_dir, FLAGS.batch_size)
                    #logits = inference(images, is_training=True)
                    logits = inference_small(images,
                                             is_training=True,
                                             num_blocks=9)
                    loss_tensor = loss(logits, labels)

                    tf.add_to_collection('losses', loss_tensor)
                    tf.add_n(tf.get_collection('losses'), name='total_loss')

                    losses = tf.get_collection('losses', n_scope)
                    total_loss = tf.add_n(losses, name='total_loss')
                    average_loss_tensor.append(total_loss)

                    tf.get_variable_scope().reuse_variables()
                    grads = optimizer.compute_gradients(total_loss)

                    tower_grads.append(grads)
        grads = average_gradients(tower_grads)
        apply_gradient_op = optimizer.apply_gradients(grads,
                                                      global_step=global_step)
        train_op = apply_gradient_op
        average_op = tf.reduce_mean(average_loss_tensor, 0)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Build an initialization operation.
        init = tf.initialize_all_variables()
        sess = tf.Session(config=config)
        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        real_batch_size = FLAGS.batch_size * FLAGS.num_gpus
        num_batches_per_epoch = int(
            (EPOCH_SIZE + real_batch_size - 1) / real_batch_size)
        iterations = FLAGS.epochs * num_batches_per_epoch
        average_batch_time = 0.0
        epochs_info = []

        step = 0
        average_loss = 0.0
        for step in xrange(iterations):
            start_time = time.time()
            #_, loss_v = sess.run([train_op, total_loss])
            _, loss_v = sess.run([train_op, average_op])
            duration = time.time() - start_time
            average_loss += loss_v
            average_batch_time += float(duration)
            assert not np.isnan(loss_v), 'Model diverged with loss = NaN'
            if step % FLAGS.log_step == 0:
                examples_per_sec = FLAGS.batch_size / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
                )
                print(format_str % (datetime.now(), step, loss_v,
                                    examples_per_sec, sec_per_batch))
            if step > 0 and step % (FLAGS.eval_step *
                                    num_batches_per_epoch) == 0:
                average_loss /= num_batches_per_epoch * FLAGS.eval_step
                print('epoch: %d, loss: %.2f' %
                      (step / num_batches_per_epoch, average_loss))
                epochs_info.append(
                    '%d:_:%s' %
                    (step /
                     (FLAGS.eval_step * num_batches_per_epoch), average_loss))
                average_loss = 0.0
        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)

        coord.request_stop()
        coord.join(threads)
        average_batch_time /= iterations
        print 'average_batch_time: ', average_batch_time
        print('epoch_info: %s' % ','.join(epochs_info))
Пример #12
0
roi_images = tf.placeholder(shape=[
    None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
    net_config.IMAGE_CHANNEL
],
                            dtype=np.float32,
                            name='roi_input')
expand_roi_images = tf.placeholder(shape=[
    None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
    net_config.IMAGE_CHANNEL
],
                                   dtype=np.float32,
                                   name='expand_roi_input')
batch_size_tensor = tf.placeholder(tf.int32, [])
logits = inference_small(roi_images,
                         expand_roi_images,
                         phase_names=['NC', 'ART', 'PV'],
                         num_classes=4,
                         is_training=is_training_tensor,
                         batch_size=batch_size_tensor)
predictions = tf.nn.softmax(logits)
saver = tf.train.Saver(tf.all_variables())
print predictions

predicted_label_tensor = tf.argmax(predictions, axis=1)
print predicted_label_tensor
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(init)
tf.train.start_queue_runners(sess=sess)
latest = tf.train.latest_checkpoint(model_path)
if not latest:
    print "No checkpoint to continue from in", model_path
Пример #13
0
def train():
  global parameters
  data_format = FLAGS.data_format
  config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement)
  #config.gpu_options.force_gpu_compatible = 1
  device_id = FLAGS.device_id
  if int(device_id) >= 0:
      device_str = '/gpu:%d'%int(device_id)
      config.allow_soft_placement = True
      config.intra_op_parallelism_threads = 1
      config.inter_op_parallelism_threads = 0
  else:
      device_str = '/cpu:0'
      num_threads = os.getenv('OMP_NUM_THREADS', 1)
      config = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
      # Default format for CPU.  When using MKL NCHW might be better but that has not been proven.
      data_format = 'NHWC'
  print('Using data format:{}'.format(data_format))
  with tf.Graph().as_default(), tf.device(device_str), tf.Session(config=config) as sess:
      initalizer = None
      images = None
      labels = None
      with tf.device('/cpu:0'):
        if FLAGS.use_dataset:
          iterator, initalizer =  cifar10_input.dataSet(FLAGS.data_dir, FLAGS.batch_size,
                                                        data_format=data_format,
                                                        device=device_str)
          images, labels = iterator.get_next()
        else:
          images, labels = cifar10_input.inputs(False, FLAGS.data_dir, FLAGS.batch_size, data_format=data_format)
        labels = tf.contrib.layers.one_hot_encoding(labels, 10)
      logits = inference_small(images, is_training=True, num_blocks=9, data_format=data_format)
      # Add a simple objective so we can calculate the backward pass.
      loss_value = loss(logits, labels)
      # Compute the gradient with respect to all the parameters.
      lr = 0.01
      update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
      with tf.control_dependencies(update_ops):
        grad = tf.train.MomentumOptimizer(lr, 0.9).minimize(loss_value)

      # Create a saver.
      saver = tf.train.Saver(tf.global_variables())

      # Build an initialization operation.
      init = tf.global_variables_initializer()
      # Start running operations on the Graph.
      sess.run(init)
      coord = None
      threads = None
      if FLAGS.use_dataset:
        sess.run(initalizer)
      else:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

      real_batch_size = FLAGS.batch_size 
      num_batches_per_epoch = int((EPOCH_SIZE + real_batch_size - 1)/ real_batch_size)
      iterations = FLAGS.epochs * num_batches_per_epoch 
      average_batch_time = 0.0

      epochs_info = []
      average_loss = 0.0
      for step in xrange(iterations):
          start_time = time.time()
          _, loss_v = sess.run([grad, loss_value])
          duration = time.time() - start_time
          average_batch_time += float(duration)
          average_loss += loss_v
          assert not np.isnan(loss_v), 'Model diverged with loss = NaN'
          if step % FLAGS.log_step == 0:
              examples_per_sec = FLAGS.batch_size / duration
              sec_per_batch = float(duration)
              format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')
              print (format_str % (datetime.now(), step, loss_v, examples_per_sec, sec_per_batch))
          if step > 0 and step % (FLAGS.eval_step * num_batches_per_epoch) == 0:
              average_loss /= num_batches_per_epoch * FLAGS.eval_step
              epochs_info.append('%d:_:%s'%(step/(FLAGS.eval_step*num_batches_per_epoch), average_loss)) 
              average_loss = 0.0
          if step == iterations-1:
              checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
              saver.save(sess, checkpoint_path, global_step=step)

      if not FLAGS.use_dataset:
        coord.request_stop()
        coord.join(threads)
      average_batch_time /= iterations
      print 'average_batch_time: ', average_batch_time
      print ('epoch_info: %s'% ','.join(epochs_info))
Пример #14
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(tf.float32,
                       shape=[
                           None, sub_Config.IMAGE_W, sub_Config.IMAGE_H,
                           sub_Config.IMAGE_CHANNEL
                       ],
                       name='input_x')
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x,
                        is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE).minimize(
            loss=loss_, global_step=global_step)
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        if load_model_path:
            saver.restore(sess, load_model_path)
        writer = tf.summary.FileWriter('./log/fine_tuning/train',
                                       tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val',
                                           tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.get_next_batch(
                sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
            images = changed_shape(images, [
                len(images), sub_Config.IMAGE_W, sub_Config.IMAGE_W,
                sub_Config.IMAGE_CHANNEL
            ])
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                })
            writer.add_summary(summary=summary, global_step=global_step_value)
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                import os
                saveedpath = os.path.join(save_model_path,
                                          str(global_step_value))
                if not os.path.exists(saveedpath):
                    os.mkdir(saveedpath)
                saveedpath += '/model.ckpt'
                saver.save(sess, saveedpath, global_step=global_step_value)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.get_next_batch(
                    sub_Config.BATCH_SIZE, sub_Config.BATCH_DISTRIBUTION)
                validation_images = changed_shape(validation_images, [
                    len(validation_images), sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W, 1
                ])
                validation_accuracy, validation_loss, summary, logits = sess.run(
                    [accuracy_tensor, loss_, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    })
                calculate_acc_error(logits=np.argmax(logits, 1),
                                    label=validation_labels,
                                    show=True)
                binary_acc = acc_binary_acc(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_loss, validation_accuracy, binary_acc)
        writer.close()
        val_writer.close()
Пример #15
0
    None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
    net_config.IMAGE_CHANNEL
],
                            dtype=np.float32,
                            name='roi_input')
expand_roi_images = tf.placeholder(shape=[
    None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
    net_config.IMAGE_CHANNEL
],
                                   dtype=np.float32,
                                   name='expand_roi_input')
batch_size_tensor = tf.placeholder(tf.int32, [])
logits = inference_small(roi_images,
                         expand_roi_images,
                         co_occurrence=True,
                         phase_names=['NC', 'ART', 'PV'],
                         num_classes=4,
                         point_phase=[2],
                         is_training=is_training_tensor,
                         batch_size=batch_size_tensor)
predictions = tf.nn.softmax(logits[2])
saver = tf.train.Saver(tf.all_variables())
print predictions

predicted_label_tensor = tf.argmax(predictions, axis=1)
print predicted_label_tensor
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(init)
tf.train.start_queue_runners(sess=sess)
latest = tf.train.latest_checkpoint(model_path)
if not latest:
Пример #16
0
def train():
    global parameters
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=FLAGS.log_device_placement)
    if device_str.find('cpu') >= 0:  # cpu version
        num_threads = os.getenv('OMP_NUM_THREADS', 1)
        config = tf.ConfigProto(allow_soft_placement=True,
                                intra_op_parallelism_threads=int(num_threads))
    with tf.Graph().as_default(), tf.device(get_device_str(
            FLAGS.device_id)), tf.Session(config=config) as sess:
        images, labels = cifar10_input.inputs(False, FLAGS.data_dir,
                                              FLAGS.batch_size)
        print('Images: ', images)

        #logits = inference(images, is_training=True, num_blocks=9)
        logits = inference_small(images, is_training=True, num_blocks=9)
        # Add a simple objective so we can calculate the backward pass.
        loss_value = loss(logits, labels)
        # Compute the gradient with respect to all the parameters.
        lr = 0.01
        #grad = tf.train.GradientDescentOptimizer(lr).minimize(loss_value)
        grad = tf.train.MomentumOptimizer(lr, 0.9).minimize(loss_value)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Build an initialization operation.
        init = tf.initialize_all_variables()
        # Start running operations on the Graph.
        sess.run(init)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        real_batch_size = FLAGS.batch_size
        num_batches_per_epoch = int(
            (EPOCH_SIZE + real_batch_size - 1) / real_batch_size)
        iterations = FLAGS.epochs * num_batches_per_epoch
        average_batch_time = 0.0

        epochs_info = []
        average_loss = 0.0
        for step in xrange(iterations):
            start_time = time.time()
            _, loss_v = sess.run([grad, loss_value])
            duration = time.time() - start_time
            average_batch_time += float(duration)
            average_loss += loss_v
            assert not np.isnan(loss_v), 'Model diverged with loss = NaN'
            if step % FLAGS.log_step == 0:
                examples_per_sec = FLAGS.batch_size / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
                )
                print(format_str % (datetime.now(), step, loss_v,
                                    examples_per_sec, sec_per_batch))
            if step > 0 and step % (FLAGS.eval_step *
                                    num_batches_per_epoch) == 0:
                average_loss /= num_batches_per_epoch * FLAGS.eval_step
                epochs_info.append(
                    '%d:_:%s' %
                    (step /
                     (FLAGS.eval_step * num_batches_per_epoch), average_loss))
                average_loss = 0.0
            if step == iterations - 1:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

        coord.request_stop()
        coord.join(threads)
        average_batch_time /= iterations
        print 'average_batch_time: ', average_batch_time
        print('epoch_info: %s' % ','.join(epochs_info))
Пример #17
0
def train():
    global parameters
    config = tf.ConfigProto(allow_soft_placement=True,
                            log_device_placement=FLAGS.log_device_placement)
    config.allow_soft_placement = True
    config.intra_op_parallelism_threads = 1
    config.inter_op_parallelism_threads = 0

    with tf.Graph().as_default(), tf.device("/" + FLAGS.local_ps_device):
        global_step = tf.get_variable('global_step', [],
                                      initializer=tf.constant_initializer(0),
                                      trainable=False)

        device_ids = FLAGS.device_ids
        if not device_ids:
            device_ids = [str(i) for i in range(FLAGS.num_gpus)]
        else:
            device_ids = device_ids.split(',')

        if len(device_ids) > FLAGS.num_gpus:
            print(
                'The device_ids should have the same number of GPUs with num_gpus'
            )
            return

        lr = 0.01
        #optimizer = tf.train.GradientDescentOptimizer(lr)
        optimizer = tf.train.MomentumOptimizer(lr, 0.9)

        def assign_to_device(device, ps_device=FLAGS.local_ps_device):
            worker_device = device
            ps_sizes = [0]
            if FLAGS.local_ps_device.lower == 'gpu':
                ps_sizes = [0] * FLAGS.num_gpus

            def _assign(op):
                if op.device:
                    return op.device
                if op.type not in ['Variable', 'VariableV2']:
                    return worker_device
                device_index, _ = min(enumerate(ps_sizes),
                                      key=operator.itemgetter(1))
                device_name = '/' + FLAGS.local_ps_device + ':' + str(
                    device_index)
                var_size = op.outputs[0].get_shape().num_elements()
                ps_sizes[device_index] += var_size
                return device_name

            return _assign

        images = None
        labels = None
        initalizer = None
        if FLAGS.use_dataset:
            with tf.device('/CPU:0'):
                iterator, initalizer = cifar10_input.dataSet(
                    FLAGS.data_dir,
                    FLAGS.batch_size,
                    device='gpu',
                    data_format=FLAGS.data_format)
                images, labels = iterator.get_next()

        tower_grads = []
        reuse_variables = None
        losses = []
        for i in six.moves.range(FLAGS.num_gpus):
            with tf.device(assign_to_device('/gpu:%s' % device_ids[i])):
                with tf.name_scope('%s_%s' %
                                   ('TOWER', device_ids[i])) as n_scope:
                    with tf.device('/cpu:0'):
                        if not FLAGS.use_dataset:
                            images, labels = cifar10_input.inputs(
                                False,
                                FLAGS.data_dir,
                                FLAGS.batch_size,
                                data_format=FLAGS.data_format)
                    with tf.variable_scope(tf.get_variable_scope(),
                                           reuse=reuse_variables):
                        logits = inference_small(images,
                                                 is_training=True,
                                                 num_blocks=9,
                                                 data_format=FLAGS.data_format)
                    hot_labels = tf.contrib.layers.one_hot_encoding(labels, 10)
                    tower_loss = loss(logits, hot_labels)
                    losses.append(tower_loss)
                    grads = optimizer.compute_gradients(tower_loss)
                    tower_grads.append(grads)
                    reuse_variables = True

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, 'TOWER_0')
        with tf.control_dependencies(update_ops):
            # Average losses accross towers (GPUs)
            total_loss = tf.reduce_mean(losses, 0)
            grads = average_gradients(tower_grads)
            apply_gradient_op = optimizer.apply_gradients(
                grads, global_step=global_step)
        train_op = apply_gradient_op

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Build an initialization operation.
        init = tf.global_variables_initializer()
        sess = tf.Session(config=config)
        sess.run(init)

        coord = None
        threads = None
        if FLAGS.use_dataset:
            sess.run(initalizer)
        else:
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        real_batch_size = FLAGS.batch_size * FLAGS.num_gpus
        num_batches_per_epoch = int(
            (EPOCH_SIZE + real_batch_size - 1) / real_batch_size)
        iterations = FLAGS.epochs * num_batches_per_epoch
        average_batch_time = 0.0
        epochs_info = []

        step = 0
        average_loss = 0.0
        for step in six.moves.xrange(iterations):
            start_time = time.time()
            _, loss_v = sess.run([train_op, total_loss])
            duration = time.time() - start_time
            average_loss += loss_v
            average_batch_time += float(duration)
            assert not np.isnan(loss_v), 'Model diverged with loss = NaN'
            if step % FLAGS.log_step == 0:
                examples_per_sec = FLAGS.batch_size / duration
                sec_per_batch = float(duration)
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
                )
                print(format_str % (datetime.now(), step, loss_v,
                                    examples_per_sec, sec_per_batch))
            if step > 0 and step % (FLAGS.eval_step *
                                    num_batches_per_epoch) == 0:
                average_loss /= num_batches_per_epoch * FLAGS.eval_step
                print('epoch: %d, loss: %.2f' %
                      (step / num_batches_per_epoch, average_loss))
                epochs_info.append(
                    '%d:_:%s' %
                    (step /
                     (FLAGS.eval_step * num_batches_per_epoch), average_loss))
                average_loss = 0.0
        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)

        if not FLAGS.use_dataset:
            coord.request_stop()
            coord.join(threads)
        average_batch_time /= iterations
        print('average_batch_time: ', average_batch_time)
        print('epoch_info: %s' % ','.join(epochs_info))
Пример #18
0
def train(train_data_set, val_data_set, load_model_path, save_model_path):
    x = tf.placeholder(
        tf.float32,
        shape=[
            None,
            sub_Config.IMAGE_W,
            sub_Config.IMAGE_H,
            sub_Config.IMAGE_CHANNEL
        ],
        name='input_x'
    )
    y_ = tf.placeholder(
        tf.float32,
        shape=[
            None,
        ]
    )
    tf.summary.histogram(
        'label',
        y_
    )
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean('use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small(x, is_training=is_training,
                        num_classes=sub_Config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        num_blocks=3)
    tf.summary.histogram(
        'logits',
        tf.argmax(y, 1)
    )
    loss_ = loss(
        logits=y,
        labels=tf.cast(y_, np.int32)

    )
    tf.summary.scalar(
        'loss',
        loss_
    )
    train_op = tf.train.GradientDescentOptimizer(
        learning_rate=sub_Config.LEARNING_RATE
    ).minimize(
        loss=loss_,
        global_step=global_step
    )
    # with tf.control_dependencies([train_step, vaeriable_average_op]):
    #     train_op = tf.no_op(name='train')

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(
                tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                tf.float32
            )
        )
        tf.summary.scalar(
            'accuracy',
            accuracy_tensor
        )
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        # sess.run(tf.global_variables_initializer())

        if load_model_path:
            # load(load_model_path, sess)
            # with tf.variable_scope('conv1_1', reuse=True):
            #     weights1 = tf.get_variable('weights')
            #     print weights1.eval(sess)
            saver.restore(sess, load_model_path)
        else:
            sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('./log/fine_tuning/train', tf.get_default_graph())
        val_writer = tf.summary.FileWriter('./log/fine_tuning/val', tf.get_default_graph())
        for i in range(sub_Config.ITERATOE_NUMBER):
            images, labels = train_data_set.images, train_data_set.labels
            images = changed_shape(images, [
                    len(images),
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_W,
                    sub_Config.IMAGE_CHANNEL
                ])
            if i == 0:
                from PIL import Image
                image = Image.fromarray(np.asarray(images[0, :, :, 0], np.uint8))
                image.show()
            # labels[labels == 1] = 0
            # labels[labels == 3] = 0
            # labels[labels == 4] = 1
            # labels[labels == 2] = 1
            _, loss_value, accuracy_value, summary, global_step_value = sess.run(
                [train_op, loss_, accuracy_tensor, merge_op, global_step],
                feed_dict={
                    x: images,
                    y_: labels
                }
            )
            writer.add_summary(
                summary=summary,
                global_step=global_step_value
            )
            if i % 500 == 0 and i != 0 and save_model_path is not None:
                # 保存模型
                saver.save(sess, save_model_path)
            if i % 100 == 0:
                validation_images, validation_labels = val_data_set.images, val_data_set.labels
                validation_images = changed_shape(
                    validation_images,
                    [
                        len(validation_images),
                        sub_Config.IMAGE_W,
                        sub_Config.IMAGE_W,
                        1
                    ]
                )
                # validation_labels[validation_labels == 1] = 0
                # validation_labels[validation_labels == 3] = 0
                # validation_labels[validation_labels == 4] = 1
                # validation_labels[validation_labels == 2] = 1
                validation_accuracy, summary, logits = sess.run(
                    [accuracy_tensor, merge_op, y],
                    feed_dict={
                        x: validation_images,
                        y_: validation_labels
                    }
                )
                calculate_acc_error(
                    logits=np.argmax(logits, 1),
                    label=validation_labels,
                    show=True
                )
                val_writer.add_summary(summary, global_step_value)
                print 'step is %d,training loss value is %g,  accuracy is %g ' \
                      'validation loss value is, accuracy is %g' % \
                      (global_step_value, loss_value, accuracy_value, validation_accuracy)
        writer.close()
        val_writer.close()
Пример #19
0
model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/3-class/Patched/models'
divided_liver = False
global_step = tf.get_variable('global_step', [],
                              initializer=tf.constant_initializer(0),
                              trainable=False)
is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
roi_images = tf.placeholder(shape=[
    None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
    net_config.IMAGE_CHANNEL
],
                            dtype=np.float32,
                            name='roi_input')
batch_size_tensor = tf.placeholder(tf.int32, [])
logits = inference_small(roi_images,
                         phase_names=['NC', 'ART', 'PV'],
                         num_classes=3,
                         point_phase=[1],
                         is_training=is_training_tensor,
                         batch_size=batch_size_tensor)
predictions = tf.nn.softmax(logits)
saver = tf.train.Saver(tf.all_variables())
print predictions

predicted_label_tensor = tf.argmax(predictions, axis=1)
print predicted_label_tensor
init = tf.initialize_all_variables()
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
sess.run(init)
tf.train.start_queue_runners(sess=sess)
latest = tf.train.latest_checkpoint(model_path)
if not latest:
    print "No checkpoint to continue from in", model_path
Пример #20
0
        statinfo = os.stat(filepath)
        print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
        tarfile.open(filepath, 'r:gz').extractall(dest_directory)


# def main(argv=None):  # pylint: disable=unused-argument
maybe_download_and_extract()

# two tensors which represents each batch
images_train, labels_train = distorted_inputs(FLAGS.data_dir, FLAGS.batch_size)

images_val, labels_val = inputs(True, FLAGS.data_dir, FLAGS.batch_size)

images_test, labels_test = get_test_data(True, FLAGS.data_dir,
                                         FLAGS.batch_size)

is_training = tf.placeholder('bool', [], name='is_training')

images, labels = tf.cond(is_training, lambda: (images_train, labels_train),
                         lambda: (images_test, labels_test))

logits = inference_small(images,
                         num_classes=10,
                         is_training=is_training,
                         use_bias=(not FLAGS.use_bn),
                         num_blocks=3)
train(is_training, logits, images, labels)

# if __name__ == '__main__':
#     tf.app.run()
Пример #21
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits, _, _, representor_tensor = inference_small(
        roi_images,
        expand_roi_images,
        phase_names=['NC', 'ART', 'PV'],
        num_classes=4,
        is_training=is_training_tensor,
        batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MICCAI2018/deeplearning/LSTM/parameters/0/0.0001'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MICCAI2018/Patches/crossvalidation/0/test'
    slice_dir = '/home/give/Documents/dataset/MICCAI2018/Slices/crossvalidation/0/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(load_patch(path, return_roi=True, parent_dir=slice_dir))
            for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        # cur_liver_densitys = [liver_density[os.path.basename(path)[:os.path.basename(path).rfind('_')]] for
        #                       path in cur_paths]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels, representor_value, logits_value = sess.run(
            [predicted_label_tensor, representor_tensor, logits],
            feed_dict={
                roi_images: cur_roi_images,
                expand_roi_images: cur_expand_roi_images,
                is_training_tensor: False,
                batch_size_tensor: len(cur_roi_images)
            })
        features.extend(representor_value)
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)

    # get the feature, visualize it
    # first dimension reduction
    from sklearn.decomposition import PCA
    dim = 2
    from plot import plot_scatter, plot_scatter3D
    pca_obj = PCA(n_components=dim)
    visualized_data = pca_obj.fit_transform(features)
    if dim == 3:
        plot_scatter3D(visualized_data[:, 0],
                       visualized_data[:, 1],
                       visualized_data[:, 2],
                       labels=labels,
                       category_num=4)
    else:
        plot_scatter(visualized_data[:, 0],
                     visualized_data[:, 1],
                     labels=labels,
                     category_num=4)

    dim = 3
    pca_obj = PCA(n_components=dim)
    visualized_data = pca_obj.fit_transform(features)
    if dim == 3:
        plot_scatter3D(visualized_data[:, 0],
                       visualized_data[:, 1],
                       visualized_data[:, 2],
                       labels=labels,
                       category_num=4)
    else:
        plot_scatter(visualized_data[:, 0],
                     visualized_data[:, 1],
                     labels=labels,
                     category_num=4)
Пример #22
0
def main(_):
    roi_images = tf.placeholder(shape=[
        None, net_config.ROI_SIZE_W, net_config.ROI_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                dtype=np.float32,
                                name='roi_input')
    expand_roi_images = tf.placeholder(shape=[
        None, net_config.EXPAND_SIZE_W, net_config.EXPAND_SIZE_H,
        net_config.IMAGE_CHANNEL
    ],
                                       dtype=np.float32,
                                       name='expand_roi_input')
    batch_size_tensor = tf.placeholder(dtype=tf.int32, shape=[])
    is_training_tensor = tf.placeholder(dtype=tf.bool, shape=[])
    logits = inference_small(roi_images,
                             expand_roi_images,
                             phase_names=['NC', 'ART', 'PV'],
                             num_classes=5,
                             point_phase=[2],
                             is_training=is_training_tensor,
                             batch_size=batch_size_tensor)
    model_path = '/home/give/PycharmProjects/MedicalImage/Net/ICIP/Patch_ROI/models/500.0'
    # model_path = '/home/give/PycharmProjects/MedicalImage/Net/forpatch/cross_validation/model/multiscale/parallel/0/2200.0'
    predictions = tf.nn.softmax(logits)
    saver = tf.train.Saver(tf.all_variables())
    print predictions

    predicted_label_tensor = tf.argmax(predictions, axis=1)
    print predicted_label_tensor
    init = tf.initialize_all_variables()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
    sess.run(init)
    tf.train.start_queue_runners(sess=sess)
    latest = tf.train.latest_checkpoint(model_path)
    if not latest:
        print "No checkpoint to continue from in", model_path
        sys.exit(1)
    print "resume", latest
    saver.restore(sess, latest)

    data_dir = '/home/give/Documents/dataset/MedicalImage/MedicalImage/Patches/ICIP/only-patch/test'
    labels = []
    paths = []
    for typeid in [0, 1, 2, 3, 4]:
        cur_path = os.path.join(data_dir, str(typeid))
        names = os.listdir(cur_path)
        labels.extend([typeid] * len(names))
        paths.extend([os.path.join(cur_path, name) for name in names])
    paths, labels = shuffle_image_label(paths, labels)
    start_index = 0
    predicted_labels = []
    liver_density = load_raw_liver_density()
    while True:
        if start_index >= len(paths):
            break
        print start_index, len(paths)
        end_index = start_index + net_config.BATCH_SIZE
        cur_paths = paths[start_index:end_index]
        cur_roi_images = [np.asarray(load_patch(path)) for path in cur_paths]
        cur_expand_roi_images = [
            np.asarray(
                load_patch(
                    path,
                    return_roi=True,
                    parent_dir=
                    '/home/give/Documents/dataset/MedicalImage/MedicalImage/SL_TrainAndVal/val'
                )) for path in cur_paths
        ]
        cur_roi_images = resize_images(cur_roi_images, net_config.ROI_SIZE_W,
                                       True)
        cur_expand_roi_images = resize_images(cur_expand_roi_images,
                                              net_config.EXPAND_SIZE_W, True)
        cur_liver_densitys = [
            liver_density[os.path.basename(path)
                          [:os.path.basename(path).rfind('_')]]
            for path in cur_paths
        ]
        # for i in range(len(cur_roi_images)):
        #     for j in range(3):
        #         cur_roi_images[i, :, :, j] = (1.0 * cur_roi_images[i, :, :, j]) / (1.0 * cur_liver_densitys[i][j])
        #         cur_expand_roi_images[i, :, :, j] = (1.0 * cur_expand_roi_images[i, :, :, j]) / (
        #         1.0 * cur_liver_densitys[i][j])
        predicted_batch_labels = sess.run(predicted_label_tensor,
                                          feed_dict={
                                              roi_images:
                                              cur_roi_images,
                                              expand_roi_images:
                                              cur_expand_roi_images,
                                              is_training_tensor:
                                              False,
                                              batch_size_tensor:
                                              len(cur_roi_images)
                                          })
        batch_labels = labels[start_index:end_index]
        predicted_labels.extend(predicted_batch_labels)
        start_index = end_index
        calculate_acc_error(predicted_batch_labels, batch_labels)
    calculate_acc_error(predicted_labels, labels)
Пример #23
0
def val(val_data_set, load_model_path, phases_names):

    x_ROI = tf.placeholder(tf.float32,
                           shape=[
                               None, net_config.ROI_SIZE_W,
                               net_config.ROI_SIZE_H,
                               net_config.IMAGE_CHANNEL * len(phases_names)
                           ],
                           name='input_x')

    x_EXPAND = tf.placeholder(tf.float32,
                              shape=[
                                  None, net_config.EXPAND_SIZE_W,
                                  net_config.EXPAND_SIZE_H,
                                  net_config.IMAGE_CHANNEL * len(phases_names)
                              ])
    y_ = tf.placeholder(tf.float32, shape=[
        None,
    ])
    tf.summary.histogram('label', y_)
    global_step = tf.Variable(0, trainable=False)
    # variable_average = tf.train.ExponentialMovingAverage(
    #     sub_Config.MOVING_AVERAGE_DECAY,
    #     global_step
    # )
    # vaeriable_average_op = variable_average.apply(tf.trainable_variables())
    # regularizer = tf.contrib.layers.l2_regularizer(sub_Config.REGULARIZTION_RATE)
    is_training = tf.placeholder('bool', [], name='is_training')
    FLAGS = tf.app.flags.FLAGS
    tf.app.flags.DEFINE_string('data_dir', '/tmp/cifar-data',
                               'where to store the dataset')
    tf.app.flags.DEFINE_boolean(
        'use_bn', True, 'use batch normalization. otherwise use biases')
    y = inference_small([x_ROI, x_EXPAND],
                        is_training=is_training,
                        num_classes=net_config.OUTPUT_NODE,
                        use_bias=FLAGS.use_bn,
                        phase_names=phases_names,
                        num_blocks=3)
    tf.summary.histogram('logits', tf.argmax(y, 1))
    loss_ = loss(logits=y, labels=tf.cast(y_, np.int32))
    tf.summary.scalar('loss', loss_)

    with tf.variable_scope('accuracy'):
        accuracy_tensor = tf.reduce_mean(
            tf.cast(tf.equal(x=tf.argmax(y, 1), y=tf.cast(y_, tf.int64)),
                    tf.float32))
        tf.summary.scalar('accuracy', accuracy_tensor)
    saver = tf.train.Saver()
    merge_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        if load_model_path:
            saver.restore(sess, load_model_path)

        validation_images, validation_images_expand, validation_labels = val_data_set.get_next_batch(
        )

        validation_accuracy, validation_loss, summary, logits = sess.run(
            [accuracy_tensor, loss_, merge_op, y],
            feed_dict={
                x_ROI: validation_images,
                x_EXPAND: validation_images_expand,
                y_: validation_labels
            })
        calculate_acc_error(logits=np.argmax(logits, 1),
                            label=validation_labels,
                            show=True)
        binary_acc = acc_binary_acc(
            logits=np.argmax(logits, 1),
            label=validation_labels,
        )

        print 'validation loss value is %g, accuracy is %g, binary_acc is %g' % \
              (validation_loss, validation_accuracy, binary_acc)
Пример #24
0
 def getModel(self, images):
     if self.selu:
         return self.buildSeluModel(images)
     else:
         return inference_small(images, num_classes=self.numClasses, use_bias=(not self.use_bn), num_blocks=3)