Example #1
0
def estimate_image_tensorflow(msi, model_checkpoint_dir):
    # estimate parameters
    collapsed_msi = imgmani.collapse_image(msi.get_image())
    # in case of nan values: set to 0
    collapsed_msi[np.isnan(collapsed_msi)] = 0.
    collapsed_msi[np.isinf(collapsed_msi)] = 0.


    tf.reset_default_graph()

    keep_prob = tf.placeholder("float")
    nr_wavelengths = len(msi.get_wavelengths())
    x = tf.placeholder("float", [None, nr_wavelengths, 1, 1])

    x_test_image = np.reshape(msi.get_image(), [-1, nr_wavelengths, 1, 1])

    # Construct the desired model
    # pred, regularizers = multilayer_perceptron(x, nr_wavelengths, 100, 1,
    #                                            keep_prob)
    pred = cnn(x, 1, keep_prob)

    # Initializing the variables
    init = tf.initialize_all_variables()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        # restore model:
        ckpt = tf.train.get_checkpoint_state(model_checkpoint_dir)

        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

            start = time.time()
            estimated_parameters = pred.eval({x: x_test_image,
                                              keep_prob:1.0})
            end = time.time()
            estimation_time = end - start
            logging.info("time necessary for estimating image parameters: " +
                str(estimation_time) + "s")
    # restore shape
    feature_dimension = 1
    if len(estimated_parameters.shape) > 1:
        feature_dimension = estimated_parameters.shape[-1]

    estimated_paramters_as_image = np.reshape(
            estimated_parameters, (msi.get_image().shape[0],
                                   msi.get_image().shape[1],
                                   feature_dimension))
    # save as sitk nrrd.
    sitk_img = sitk.GetImageFromArray(estimated_paramters_as_image,
                                 isVector=True)

    return sitk_img, estimation_time
Example #2
0
def estimate_image_tensorflow(msi, model_checkpoint_dir):
    # estimate parameters
    collapsed_msi = imgmani.collapse_image(msi.get_image())
    # in case of nan values: set to 0
    collapsed_msi[np.isnan(collapsed_msi)] = 0.
    collapsed_msi[np.isinf(collapsed_msi)] = 0.


    tf.reset_default_graph()

    keep_prob = tf.placeholder("float")
    nr_wavelengths = len(msi.get_wavelengths())
    x = tf.placeholder("float", [None, nr_wavelengths, 1, 1])

    x_test_image = np.reshape(msi.get_image(), [-1, nr_wavelengths, 1, 1])

    # Construct the desired model
    # pred, regularizers = multilayer_perceptron(x, nr_wavelengths, 100, 1,
    #                                            keep_prob)
    pred = cnn(x, 1, keep_prob)

    # Initializing the variables
    init = tf.initialize_all_variables()

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())
        # restore model:
        ckpt = tf.train.get_checkpoint_state(model_checkpoint_dir)

        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

            start = time.time()
            estimated_parameters = pred.eval({x: x_test_image,
                                              keep_prob:1.0})
            end = time.time()
            estimation_time = end - start
            logging.info("time necessary for estimating image parameters: " +
                str(estimation_time) + "s")
    # restore shape
    feature_dimension = 1
    if len(estimated_parameters.shape) > 1:
        feature_dimension = estimated_parameters.shape[-1]

    estimated_paramters_as_image = np.reshape(
            estimated_parameters, (msi.get_image().shape[0],
                                   msi.get_image().shape[1],
                                   feature_dimension))
    # save as sitk nrrd.
    sitk_img = sitk.GetImageFromArray(estimated_paramters_as_image,
                                 isVector=True)

    return sitk_img, estimation_time
    def run(self):
        # extract data from the batch
        tensorflow_dataset = read_data_set(self.input()[0].path)
        test_dataset = read_data_set(self.input()[1].path)

        # train regressor
        # Construct the desired model

        # Network Parameters
        nr_filters = len(sc.other["RECORDED_WAVELENGTHS"])
        x = tf.placeholder("float", [None, nr_filters, 1, 1])
        # Construct the desired model
        keep_prob = tf.placeholder("float")
        # pred, regularizers = multilayer_perceptron(x, nr_filters, 100, 1,
        #                                            keep_prob)
        pred = cnn(x, 1, keep_prob)
        # define parameters
        learning_rate = 0.0001
        training_epochs = 300
        batch_size = 100
        display_step = 1

        # Define loss and optimizer

        y = tf.placeholder("float", [None, 1])
        cost = tf.reduce_mean(tf.square(pred - y))
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

        # Initializing the variables
        init = tf.initialize_all_variables()

        saver = tf.train.Saver()  # defaults to saving all variables

        # Launch the graph
        with tf.Session() as sess:
            sess.run(init)

            # Training cycle
            for epoch in range(training_epochs):
                avg_cost = 0.
                total_batch = int(tensorflow_dataset.num_examples/batch_size)
                # Loop over all batches
                for i in range(total_batch):
                    batch_xs, batch_ys = tensorflow_dataset.next_batch(batch_size)
                    # Fit training using batch data
                    x_image = np.reshape(batch_xs, [-1, nr_filters, 1, 1])
                    sess.run(optimizer, feed_dict={x: x_image, y: batch_ys,
                                                   keep_prob: 0.75})
                    # Compute average loss
                    avg_cost += sess.run(cost, feed_dict={x: x_image, y: batch_ys,
                                                          keep_prob: 1.0})/total_batch
                # Display logs per epoch step
                if epoch % display_step == 0:
                    print "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost)

            # Test model
            accuracy = tf.reduce_mean(tf.cast(tf.abs(pred-y), "float"))
            x_test_image = np.reshape(test_dataset.images, [-1, nr_filters, 1, 1])
            print "Mean testing error:", accuracy.eval({x: x_test_image,
                                                          y: test_dataset.labels,
                                                          keep_prob:1.0})

            print "Optimization Finished!"
            saver.save(sess, self.output().path)
    def run(self):
        # extract data from the batch
        tensorflow_dataset = read_data_set(self.input()[0].path)
        test_dataset = read_data_set(self.input()[1].path)

        # train regressor
        # Construct the desired model

        # Network Parameters
        nr_filters = len(sc.other["RECORDED_WAVELENGTHS"])
        x = tf.placeholder("float", [None, nr_filters, 1, 1])
        # Construct the desired model
        keep_prob = tf.placeholder("float")
        # pred, regularizers = multilayer_perceptron(x, nr_filters, 100, 1,
        #                                            keep_prob)
        pred = cnn(x, 1, keep_prob)
        # define parameters
        learning_rate = 0.0001
        training_epochs = 300
        batch_size = 100
        display_step = 1

        # Define loss and optimizer

        y = tf.placeholder("float", [None, 1])
        cost = tf.reduce_mean(tf.square(pred - y))
        optimizer = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(cost)

        # Initializing the variables
        init = tf.initialize_all_variables()

        saver = tf.train.Saver()  # defaults to saving all variables

        # Launch the graph
        with tf.Session() as sess:
            sess.run(init)

            # Training cycle
            for epoch in range(training_epochs):
                avg_cost = 0.
                total_batch = int(tensorflow_dataset.num_examples / batch_size)
                # Loop over all batches
                for i in range(total_batch):
                    batch_xs, batch_ys = tensorflow_dataset.next_batch(
                        batch_size)
                    # Fit training using batch data
                    x_image = np.reshape(batch_xs, [-1, nr_filters, 1, 1])
                    sess.run(optimizer,
                             feed_dict={
                                 x: x_image,
                                 y: batch_ys,
                                 keep_prob: 0.75
                             })
                    # Compute average loss
                    avg_cost += sess.run(cost,
                                         feed_dict={
                                             x: x_image,
                                             y: batch_ys,
                                             keep_prob: 1.0
                                         }) / total_batch
                # Display logs per epoch step
                if epoch % display_step == 0:
                    print "Epoch:", '%04d' % (
                        epoch + 1), "cost=", "{:.9f}".format(avg_cost)

            # Test model
            accuracy = tf.reduce_mean(tf.cast(tf.abs(pred - y), "float"))
            x_test_image = np.reshape(test_dataset.images,
                                      [-1, nr_filters, 1, 1])
            print "Mean testing error:", accuracy.eval({
                x: x_test_image,
                y: test_dataset.labels,
                keep_prob: 1.0
            })

            print "Optimization Finished!"
            saver.save(sess, self.output().path)