예제 #1
0
def test_classifier_per(clf, imask, number, params=None):

    #tests the classifier trained on a number of an image's pixels. returns the average dice coefficients the number of pixels trained on and

    if params == None:
        params = ps.trainableParameters()

    images = imask[0]
    masks = imask[1]

    output, clf, clock = ClusterPercent(images[0],
                                        masks[0],
                                        clf,
                                        number,
                                        params=params)

    output = []
    for i in range(10):
        output.append(ps.ClassifierSegment(clf, images[i], parameters=params))

    alldice = np.zeros((2, 2, 10))
    for i in range(10):
        #tempp = ps.toggle_channels(output[i]) #for image saving
        output[i] = 2 - output[i]
        masks[i] = 2 - masks[i]

        alldice[:, :, i] = ac.dice(output[i], masks[i])

    #avdice = np.zeros((2,2))
    #avdice[:,:] = alldice[:,:,:].mean(axis=2)

    return alldice, clock
예제 #2
0
def test_man_seg(afolder):

    folderm = f"Ground truth/{afolder}/mask"
    folderm2 = f"Ground truth/{afolder}/mask2 EB"

    masks = []
    masks2 = []
    names = []

    for mask in os.listdir((folderm2)):

        maskfile2 = np.asarray(Image.open(os.path.join(folderm2, mask)))
        if afolder == 'AuGe TEM':
            maskfile1 = np.asarray(
                Image.open(os.path.join(folderm, mask)[:-3] + 'tif'))
            thing = np.zeros_like(maskfile1)
            thing[maskfile1 == 255] = 1
            maskfile1 = thing

        else:
            maskfile1 = np.asarray(Image.open(os.path.join(folderm, mask)))
            maskfile1 = ps.toggle_channels(maskfile1[:, :, :3],
                                           colors=['#0000ff', '#ff0000'])
            maskfile1 = (maskfile1 - 1).astype(np.bool_).astype(np.int)

        maskfile2 = ps.toggle_channels(maskfile2[:, :, :3],
                                       colors=['#0000ff', '#ff0000'])
        maskfile2 = maskfile2 - 1
        masks.append(maskfile1)
        masks2.append(maskfile2)

    data = open(f"Results/manual seg{afolder}.csv", 'w')
    data.write("pixel accuracy,dice01,dice11,dice00,dice10\n")

    for i in range(len(masks)):

        pixacc = ac.check_ground(masks[i], masks2[i])
        dice = ac.dice(masks[i], masks2[i])
        data.write("{},{},{},{},{}\n".format(pixacc, dice[0, 1], dice[1, 1],
                                             dice[0, 0], dice[1, 0]))

    data.close()
    return
예제 #3
0
def test_classifier_mult(afolder, clf, n=1, parameters=None):

    folderi = f"Ground truth/{afolder}/image"
    folderm = f"Ground truth/{afolder}/mask"

    if parameters == None:
        params = ps.trainableParameters()

    images = []
    masks = []
    names = []

    for mask in os.listdir((folderm)):
        names.append(mask)
        maskfile = np.asarray(Image.open(os.path.join(folderm, mask)))
        if afolder == 'AuGe TEM':
            image = mask[:-7] + ".tiff"

            thing = np.zeros_like(maskfile)
            thing[maskfile == 255] = 1
            thing = thing + 1
        else:
            image = mask[:-7] + ".dm4"

            thing = ps.toggle_channels(maskfile[:, :, :3],
                                       colors=['#0000ff', '#ff0000'])

        imagefile = hs.load(os.path.join(folderi, image))
        import matplotlib.pyplot as plt
        im = imagefile.data
        normed = (im - im.min()) * (1000 / (im.max() - im.min()))

        images.append(normed)
        masks.append(thing)

    data = open(f"results/dicedata{afolder}{str(clf)}.csv", 'w')
    data.write("pixel accuracy,dice01,dice11,dice00,dice10\n")

    tic = time.perf_counter()
    output, clf = ps.ClusterTrained(images[:n],
                                    masks[:n],
                                    clf,
                                    parameters=parameters)
    #output, clf = ps.ClusterTrained(images[:n], masks[:n], clf, membrane = [1,1,1,1,1,1], texture= True, minimum = True,sigma = p[0], high_sigma = p[1], disk_size = p[2])
    toc = time.perf_counter() - tic
    print('trained classifier in {} seconds'.format(toc))
    data.write('{}\n'.format(toc))
    tic = time.perf_counter()

    if n == 1:
        output = [output]
    for i in range(n, len(masks)):
        output.append(
            ps.ClassifierSegment(clf, images[i], parameters=parameters))
        #output.append(ps.ClassifierSegment(clf, images[i], membrane = [1,1,1,1,1,1], texture= True, minimum = True, sigma = p[0], high_sigma = p[1], disk_size = p[2]))

    toc = time.perf_counter() - tic
    data.write('{}\n'.format(toc))
    print('classified images in {} seconds'.format(toc))

    for i in range(len(masks)):

        output[i] = 2 - output[i]
        masks[i] = 2 - masks[i]

        tempp = ps.toggle_channels(2 - output[i])
        maskim = Image.fromarray(tempp)
        maskim.save(f'results/{names[i]}')

        pixacc = ac.check_ground(output[i], masks[i])
        print("accuracy: {}".format(pixacc))
        dice = ac.dice(output[i], masks[i])
        print("dice {} {}\n     {} {}".format(dice[0, 1], dice[1, 1],
                                              dice[0, 0], dice[1, 0]))
        data.write("{},{},{},{},{}\n".format(pixacc, dice[0, 1], dice[1, 1],
                                             dice[0, 0], dice[1, 0]))

    data.close()
    return
예제 #4
0
def test_classifier(afolder, clf):

    folderi = f"Ground truth/{afolder}/image"
    folderm = f"Ground truth/{afolder}/mask"

    images = []
    masks = []
    names = []

    for mask in os.listdir((folderm)):
        names.append(mask)
        maskfile = np.asarray(Image.open(os.path.join(folderm, mask)))
        if afolder == 'AuGe TEM':
            image = mask[:-7] + ".tiff"
            imagefile = hs.load(os.path.join(folderi, image))
            images.append(imagefile.data)
            thing = np.zeros_like(maskfile)
            thing[maskfile == 255] = 1
            thing = thing + 1
        else:
            image = mask[:-5] + ".dm4"
            imagefile = hs.load(os.path.join(folderi, image))
            images.append(imagefile.data)
            thing = ps.toggle_channels(maskfile[:, :, :3],
                                       colors=['#0000ff', '#ff0000'])

        masks.append(thing)

    data = open(f"results/dicedata{afolder}{str(clf)}.csv", 'w')

    data.write("pixel accuracy,dice01,dice11,dice00,dice10\n")

    for i in range(len(masks)):

        mk = np.copy(masks[i])
        im = np.copy(images[i])

        if i == 0:
            tic = time.perf_counter()
            _, clf = ps.ClusterTrained(
                im,
                mk,
                clf,
                sigma=1,
                high_sigma=16,
                disk_size=20,
            )
            toc = time.perf_counter() - tic
            print('trained classifier in {} seconds'.format(toc))
            data.write('{}\n'.format(toc))
            tic = time.perf_counter()
        output = ps.ClassifierSegment(
            clf,
            im,
            sigma=10,
            high_sigma=16,
            disk_size=20,
        )
        im = ps.toggle_channels(output)
        mk = 2 - mk
        output = 2 - output

        maskim = Image.fromarray(im)
        maskim.save("ims/{}".format(names[i]))

        print("accuracy: {}".format(ac.check_ground(output, mk)))
        dice = ac.dice(output, mk)
        print("dice {} {}\n     {} {}".format(dice[0, 1], dice[1, 1],
                                              dice[0, 0], dice[1, 0]))
        data.write("{},{},{},{},{}\n".format(ac.check_ground(output, mk),
                                             dice[0, 1], dice[1, 1],
                                             dice[0, 0], dice[1, 0]))
    toc = time.perf_counter() - tic
    data.write('{}\n'.format(toc))
    print('classified images in {} seconds'.format(toc))
    data.close()
    return
예제 #5
0
def train(config):
  """Trains the model based on configuration settings

  Args:
    config: configurations for training the model
  """

  tf.reset_default_graph()

  data = DataReader(config.directory, config.image_dims, config.batch_size, 
                    config.num_epochs, config.use_weights)
  train_data = data.train_batch(config.train_file)
  num_train_images = data.num_images

  test_data = data.test_batch(config.val_file)
  num_val_images = data.num_images

  # determine number of iterations based on number of images
  training_iterations = int(np.floor(num_train_images/config.batch_size))
  validation_iterations = int(np.floor(num_val_images/config.batch_size))

  # create iterators allowing us to switch between datasets
  handle = tf.placeholder(tf.string, shape=[])
  iterator = tf.data.Iterator.from_string_handle(handle, 
    train_data.output_types, train_data.output_shapes)
  next_element = iterator.get_next()
  training_iterator = train_data.make_initializable_iterator()
  val_iterator = test_data.make_initializable_iterator()

  # create placeholder for train or test
  train_network = tf.placeholder(tf.bool, [])

  # get images and pass into network
  image, label, weight = next_element
  drn = DRN(image, config.image_dims, config.batch_size, config.num_classes, 
            train_network, config.network)

  # get predictions and logits
  prediction = drn.pred
  logits = drn.prob
  label = tf.squeeze(label, 3)

  # resize the logits using bilinear interpolation
  imsize = tf.constant([config.image_dims[0], config.image_dims[1]], 
                        dtype=tf.int32)
  logits = tf.image.resize_bilinear(logits, imsize)
  print('Resized shape is {}'.format(logits.get_shape()))

  prediction = tf.argmax(logits, 3)

  if config.loss == 'CE':
    if config.use_weights:
      label_one_hot = tf.one_hot(label, config.num_classes)
      loss = tf.nn.softmax_cross_entropy_with_logits(labels=label_one_hot, 
                                                      logits=logits)
      loss = loss*tf.squeeze(weight, 3)
    else:
      # use sparse with flattened labelmaps
      loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, 
                                                            logits=logits)
    loss = tf.reduce_mean(loss)
  elif config.loss == 'dice':
    loss = dice_loss(logits, label, config.num_classes, 
                      use_weights=config.use_weights)
  else:
    NameError("Loss must be specified as CE or DICE")

  # global step to keep track of iterations
  global_step = tf.Variable(0, trainable=False, name='global_step')

  # create placeholder for learning rate
  learning_rate = tf.placeholder(tf.float32, shape=[])

  optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step)

  saver = tf.train.Saver(max_to_keep=3)

  init = tf.global_variables_initializer()

  with tf.Session() as sess:
    training_handle = sess.run(training_iterator.string_handle())
    validation_handle = sess.run(val_iterator.string_handle())
    
    sess.run(training_iterator.initializer)
    sess.run(init)
    
    ckpt = tf.train.get_checkpoint_state(config.logs)
    if ckpt and ckpt.model_checkpoint_path:
      saver.restore(sess, ckpt.model_checkpoint_path) 
      print('Restoring session at step {}'.format(global_step.eval()))
      
    # if restoring saved checkpoint get last saved iteration so that correct
    # epoch can be restored
    iteration = global_step.eval()
    start_epoch = int(np.floor(iteration/training_iterations)) 
    
    for current_epoch in range(start_epoch, config.num_epochs):

      train_loss = 0
      for i in range(training_iterations):
        _, l = sess.run([optimizer, loss], feed_dict={handle:training_handle, 
          learning_rate:config.learning_rate, train_network:True})
        train_loss += l
        iteration = global_step.eval()

      sess.run(val_iterator.initializer)
      val_loss = 0
      for i in range(validation_iterations):
        l, img, lbl, pred = sess.run([loss, image, label, prediction], 
          feed_dict={handle:validation_handle, train_network:False})
        val_loss += l

        # evaluate accuracy
        accuracy = jaccard(lbl, pred, config.num_classes)
        dice_score = dice(lbl, pred, config.num_classes)

      print('Train loss Epoch {} step {} :{}'.format(current_epoch, iteration, 
        train_loss/training_iterations))
      print('Validation loss Epoch {} step {} :{}'.format(current_epoch, iteration, 
        val_loss/validation_iterations))

      with open('loss.txt', 'a') as f: 
        f.write("Epoch: {} Step: {} Loss: {}\n".format(current_epoch, iteration, 
          train_loss/training_iterations))

      saver.save(sess, config.logs + '/model.ckpt', global_step)