Ejemplo n.º 1
0
def get_prediction(checkpoint_path, images):
    p = PolypDetectionModel()
    predictions = []
    with tf.Graph().as_default():
        X = tf.placeholder(tf.float32, [None, 227, 227, 3], name="input")
        output_0 = p.get_model(X, training=False)
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
        with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
            sess.run(init)
            saver.restore(sess, checkpoint_path)
            for image, image_rgb, file_name in images:
                prediction = sess.run(output_0, feed_dict={X: image_rgb})
                predictions.append(prediction)
    return predictions
Ejemplo n.º 2
0
 def __init__(self, **kwargs):
     self.train_image_path = kwargs.get("train_image")
     self.train_label_path = kwargs.get("train_label")
     self.validate = kwargs.get("validate", False)
     self.valid_image_path = kwargs.get("valid_image", None)
     self.valid_label_path = kwargs.get("valid_label", None)
     self.train_dataset = (np.load(self.train_image_path),
                           np.load(self.train_label_path))
     if self.validate:
         if self.valid_image_path is None:
             raise AssertionError("Validation dataset path not given")
         self.val_dataset = (np.load(self.valid_image_path),
                             np.load(self.valid_label_path))
     self.epoch = kwargs.get("epoch", 100)
     self.save_point = kwargs.get("save_point", 50)
     self.validation_point = kwargs.get("validation_point", None)
     self.batch_size = kwargs.get('batch_size', 32)
     self.val_batch_size = kwargs.get('val_batch_size', 32)
     self.learning_rate = kwargs.get("learning_rate", 1e-3)
     self.checkpoint_dir_path = kwargs.get("checkpoint_dir_path",
                                           "results/checkpoint/")
     self.device = kwargs.get("device", "/device:XLA_GPU:0")
     self.model = PolypDetectionModel()
Ejemplo n.º 3
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    model = PolypDetectionModel()
    model.build_model(False)
    model.load_weights(FLAGS.weights)
    eval_model = model.get_model()

    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')

    if FLAGS.tfrecord:
        dataset = load_tfrecord_dataset(FLAGS.tfrecord, FLAGS.classes,
                                        FLAGS.size)
        dataset = dataset.shuffle(512)
        img_raw, _label = next(iter(dataset.take(1)))
    else:
        img_raw = tf.image.decode_image(open(FLAGS.image, 'rb').read(),
                                        channels=3)

    img = tf.expand_dims(img_raw, 0)
    img = PolypDataset.transform_images(img)

    t1 = time.time()
    boxes, scores, classes, nums = eval_model(img)
    t2 = time.time()
    logging.info('time: {}'.format(t2 - t1))

    logging.info('detections:')
    for i in range(nums[0]):
        logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                           np.array(scores[0][i]),
                                           np.array(boxes[0][i])))

    img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
    img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
    cv2.imwrite(FLAGS.output, img)
    logging.info('output saved to: {}'.format(FLAGS.output))
Ejemplo n.º 4
0
def convert(model_name, tf_weights_input, pb_weights_output):
    model = PolypDetectionModel(model_name=model_name)
    model.build_model(True)
    model.load_weights(tf_weights_input)
    tf_model = model.get_model()
    tf.saved_model.save(tf_model, pb_weights_output)
Ejemplo n.º 5
0
def main(_argv):
    set_up_gpu()
    train_log_dir = set_up_directories(FLAGS.checkpoint_dir_path,
                                       FLAGS.log_dir_path,
                                       FLAGS.tflite_model_dir_path)
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    train_summary_writer.set_as_default()

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)

    model = PolypDetectionModel()
    model.build_model(True)
    training_model = model.get_model()

    train_dataset, val_dataset = model.get_dataset(
        train_dataset_path=FLAGS.dataset,
        val_dataset_path=FLAGS.val_dataset,
        classes_path=FLAGS.classes)
    # prepare for the train dataset and validation dataset
    step = 0
    if FLAGS.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                tf.summary.experimental.set_step(step)
                step = step + 1

                with tf.GradientTape() as tape:
                    outputs = training_model(images, training=True)
                    pred_loss = model.get_loss(labels, outputs, training=True)
                    total_loss = tf.reduce_sum(pred_loss)

                grads = tape.gradient(total_loss,
                                      training_model.trainable_variables)
                optimizer.apply_gradients(
                    zip(grads, training_model.trainable_variables))
                avg_loss.update_state(total_loss)

            for batch, (images, labels) in enumerate(val_dataset):
                outputs = training_model(images, training=True)
                regularization_loss = tf.reduce_sum(training_model.losses)
                pred_loss = model.get_loss(labels, outputs, training=False)
                total_loss = tf.reduce_sum(pred_loss)
                avg_val_loss.update_state(total_loss)

            logging.info(
                "epoch:{}, average train/valid loss per batch: {}/{}".format(
                    epoch,
                    avg_loss.result().numpy(),
                    avg_val_loss.result().numpy()))
            tf.summary.scalar("average train loss per batch",
                              avg_loss.result().numpy())
            tf.summary.scalar("average valid loss per batch",
                              avg_val_loss.result().numpy())
            avg_loss.reset_states()
            avg_val_loss.reset_states()

            if epoch % FLAGS.save_points == 0 or epoch == FLAGS.epochs:
                logging.info(
                    "-----------------------------------------------------------------------------"
                )
                ckpt_filepath = os.path.join(FLAGS.checkpoint_dir_path,
                                             "polyp_train_{}.tf".format(epoch))
                training_model.save_weights(ckpt_filepath)
                converter = tf.lite.TFLiteConverter.from_keras_model(
                    training_model)
                tflite_model = converter.convert()
                tflite_file_path = os.path.join(FLAGS.tflite_model_dir_path,
                                                "model{}.tflite".format(epoch))
                open(tflite_file_path, "wb").write(tflite_model)
Ejemplo n.º 6
0
import tensorflow as tf
from tensorflow.python.framework.graph_util import convert_variables_to_constants
from Models.models import PolypDetectionModel

p = PolypDetectionModel()
checkpoint_path = "../results/model"
with tf.Graph().as_default():
    X = tf.placeholder(tf.float32, [None, 227, 227, 3], name="input")
    output_0 = p.get_model(X, training=False)
    saver = tf.train.Saver()
    init = tf.global_variables_initializer()
    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
        sess.run(init)
        saver.restore(sess, checkpoint_path)
        minimal_graph = convert_variables_to_constants(sess, sess.graph_def,
                                                       ["output"])
        tf.io.write_graph(minimal_graph,
                          '.',
                          '../results/model.pb',
                          as_text=False)
        tf.io.write_graph(minimal_graph,
                          '.',
                          '../results/model.txt',
                          as_text=True)
Ejemplo n.º 7
0
def main(_agrs):
    checkpoint_path = "../results/model"
    test_image_path = "../data/test_image.npy"
    test_label_path = "../data/test_label.npy"
    test_image = np.load(test_image_path)
    test_label = np.load(test_label_path)

    polyp_images_dict = {}
    for iimg, (image, label) in enumerate(zip(test_image, test_label)):
        properties = {}
        x1y1, x2y2 = label[:2], label[2:4]
        properties["image_rgb"] = [image]
        properties["ground_truth"] = [x1y1, x2y2]
        properties["score"] = None
        properties["bndbox"] = None
        polyp_images_dict[iimg] = properties
    # images without any polyp should be included in evaluation

    p = PolypDetectionModel()
    with tf.Graph().as_default():
        X = tf.placeholder(tf.float32, [None, 227, 227, 3], name="input")
        output_0 = p.get_model(X, training=False)
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
        with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
            sess.run(init)
            saver.restore(sess, checkpoint_path)
            for image in polyp_images_dict.keys():
                prediction = sess.run(output_0, feed_dict={X: polyp_images_dict[image]["image_rgb"]})
                boxes, objectness, classes, nums = prediction
                coordinates = get_bounding_box(boxes)
                if coordinates is not None:
                    polyp_images_dict[image]["bndbox"] = [coordinates[0], coordinates[1]]
                polyp_images_dict[image]["score"] = objectness[0][0]

    for image in polyp_images_dict.keys():
        # This part is very important! Note that we only consider one bounding box outputted by DNN
        properties = polyp_images_dict[image]
        bndbox = properties["bndbox"]
        if bndbox is not None:
            ground_truth = properties["ground_truth"]
            ground_truth = tuple(ground_truth[0]) + tuple(ground_truth[1])
            bndbox = bndbox[0] + bndbox[1]
            iouRs = iou(ground_truth, bndbox)
            properties["iou"] = iouRs
        else: properties["iou"] = 0

    tmp_ax = None
    for iou_threshold in np.arange(0.1, 0.5, 0.05):
        # The AP is calcluated in this part
        recall = 0.0
        precision = 1.0
        sumArea = 0.0
        precisionList = []
        recallList = []
        for scoreThreshold in np.arange(0.98, -0.1, -0.1):  # score is actually the object probability.
            TN = 0  # True negative
            FP = 0  # False positive
            TP = 0  # True positive
            FN = 0  # False negative
            for image in polyp_images_dict.keys():
                properties = polyp_images_dict[image]
                score = properties["score"]
                if score >= scoreThreshold:
                    iouRs = properties["iou"]
                    if iouRs > iou_threshold:
                        TP = TP + 1
                    else:
                        FP = FP + 1
                else:
                    FN = FN+1
                    TN = TN+1

            previousRecall = recall
            previousPrecision = precision
            if (TP + FP) == 0:
                precision = 0
            else:
                precision = TP / (TP + FP)
            recall = TP / (TP + FN)

            deltaRecall = np.fabs(recall - previousRecall)
            deltaArea = deltaRecall * (precision + previousPrecision) / 2.0 # calculate the area.
            sumArea = sumArea + deltaArea #sumArea is the AP that we want
            print("sumArea:{:.3f}, precision:{:.3f},recall:{:.3f},deltaRecall:{:.3f}".format(sumArea, precision, recall, deltaRecall))

            precisionList.append(precision)
            recallList.append(recall)
        precision_recall_df = pd.DataFrame({'precision': precisionList, 'recall': recallList})
        if tmp_ax is None:
            tmp_ax = precision_recall_df.plot(kind='line', x='recall', y='precision')
        else:
            precision_recall_df.plot(kind='line', x='recall', y='precision', ax=tmp_ax)
    threshold_label = np.arange(0.1, 0.5, 0.05)
    threshold_label = ['{:.2f}'.format(x) for x in threshold_label]
    tmp_ax.legend(threshold_label)
    plt.ylabel("Precision", fontsize=20)
    plt.xlabel("Recall", fontsize=20)
    plt.show()
Ejemplo n.º 8
0
class Trainer:
    def __init__(self, **kwargs):
        self.train_image_path = kwargs.get("train_image")
        self.train_label_path = kwargs.get("train_label")
        self.validate = kwargs.get("validate", False)
        self.valid_image_path = kwargs.get("valid_image", None)
        self.valid_label_path = kwargs.get("valid_label", None)
        self.train_dataset = (np.load(self.train_image_path),
                              np.load(self.train_label_path))
        if self.validate:
            if self.valid_image_path is None:
                raise AssertionError("Validation dataset path not given")
            self.val_dataset = (np.load(self.valid_image_path),
                                np.load(self.valid_label_path))
        self.epoch = kwargs.get("epoch", 100)
        self.save_point = kwargs.get("save_point", 50)
        self.validation_point = kwargs.get("validation_point", None)
        self.batch_size = kwargs.get('batch_size', 32)
        self.val_batch_size = kwargs.get('val_batch_size', 32)
        self.learning_rate = kwargs.get("learning_rate", 1e-3)
        self.checkpoint_dir_path = kwargs.get("checkpoint_dir_path",
                                              "results/checkpoint/")
        self.device = kwargs.get("device", "/device:XLA_GPU:0")
        self.model = PolypDetectionModel()

    def train(self):
        train_size = len(self.train_dataset[0])
        self.batch_size = train_size if train_size < self.batch_size else self.batch_size
        self.val_batch_size = train_size if train_size < self.val_batch_size else self.val_batch_size
        batch_x, batch_y = None, None
        cur_loss, cur_val_loss = None, None
        lowest_loss = float('inf')
        with tf.device(self.device):
            with tf.Graph().as_default():
                X = tf.placeholder(tf.float32, [None, 227, 227, 3],
                                   name="input")
                Y = tf.placeholder(tf.float32, [None, 4, 4, 5], name="output")
                output_0 = self.model.get_model(X, training=True)
                loss = self.model.get_loss(y_true=Y,
                                           y_pred=output_0,
                                           train_state=True)
                global_step = tf.contrib.framework.get_or_create_global_step()
                train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(
                    loss, global_step=global_step)
                summary_op = tf.summary.merge_all()
                images = self.train_dataset[0]
                labels = self.train_dataset[1]
                val_size = len(self.val_dataset)
                val_images = self.val_dataset[0]
                val_labels = self.val_dataset[1]
                init = tf.global_variables_initializer()
                saver = tf.train.Saver()
                with tf.Session(config=tf.ConfigProto(
                        log_device_placement=True)) as sess:
                    writer = tf.summary.FileWriter("../summary/", sess.graph)
                    sess.run(init)
                    cur_loss = 0
                    for i in range(1, self.epoch + 1):
                        # if lowest_loss < 20 and self.learning_rate > 1e-4:
                        #     self.learning_rate = 1e-4
                        #     train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss,
                        #                                                                    global_step=global_step)
                        batch_epoch_size = int(len(images) / self.batch_size)
                        for b_num in range(batch_epoch_size):
                            offset = b_num * self.batch_size
                            if offset + self.batch_size < train_size:
                                batch_x, batch_y = images[offset:(
                                    offset + self.batch_size)], labels[offset:(
                                        offset + self.batch_size)]
                            else:
                                batch_x, batch_y = images[offset:], labels[
                                    offset:]
                            _, cur_loss, summary = sess.run(
                                [train_op, loss, summary_op],
                                feed_dict={
                                    X: batch_x,
                                    Y: batch_y
                                })
                        print("training loss : ", i, cur_loss)
                        if self.validate:
                            if i % self.validation_point == 0:
                                for b_num in range(
                                        int(
                                            len(val_images) /
                                            self.val_batch_size)):
                                    val_offset = (
                                        b_num * self.val_batch_size
                                    ) % (val_size - 1) if val_size > 1 else 0
                                    val_batch_x = val_images[val_offset:(
                                        val_offset + self.val_batch_size)]
                                    val_batch_y = val_labels[val_offset:(
                                        val_offset + self.val_batch_size)]
                                    cur_val_loss, val_summary = sess.run(
                                        [loss, summary_op],
                                        feed_dict={
                                            X: val_batch_x,
                                            Y: val_batch_y
                                        })
                                if cur_val_loss < lowest_loss:
                                    lowest_loss = cur_val_loss
                                    print("current lowest validation loss : ",
                                          i, cur_val_loss)
                                    saver.save(
                                        sess,
                                        os.path.join(self.checkpoint_dir_path,
                                                     "model"))
                        else:
                            if cur_loss < lowest_loss:
                                saver.save(
                                    sess,
                                    os.path.join(self.checkpoint_dir_path,
                                                 "model"))

                        if i % self.save_point == 0 or i == self.epoch:
                            saver.save(
                                sess,
                                os.path.join(self.checkpoint_dir_path,
                                             "model"), i)