예제 #1
0
def train(loader, optimizer, model, epochs=5, batch_size=2, show_loss=False, augmenter=False, lr=None, init_lr=2e-4,
          saver=None, variables_to_optimize=None, evaluation=True, name_best_model = 'weights/best', preprocess_mode=None, aux_loss=False):
    training_samples = len(loader.image_train_list)
    steps_per_epoch = (training_samples / batch_size) + 1
    best_miou = 0

    for epoch in range(epochs):  # for each epoch
        lr_decay(lr, init_lr, 1e-9, epoch, epochs - 1)  # compute the new lr
        print('epoch: ' + str(epoch) + '. Learning rate: ' + str(lr.numpy()))
        for step in range(steps_per_epoch):  # for every batch
            with tf.GradientTape() as g:
                # get batch
                x, y, mask = loader.get_batch(size=batch_size, train=True, augmenter=augmenter)
                x = preprocess(x, mode=preprocess_mode)
                [x, y, mask] = convert_to_tensors([x, y, mask])
                if aux_loss:
                    y_, aux_y_ = model(x, training=True, aux_loss=aux_loss)  # get output of the model
                    loss = tf.losses.softmax_cross_entropy(y, y_, weights=mask) +\
                           tf.losses.softmax_cross_entropy(y, aux_y_, weights=mask)  # compute loss
                else:
                    y_ = model(x, training=True, aux_loss=aux_loss)  # get output of the model
                    loss = tf.losses.softmax_cross_entropy(y, y_, weights=mask)  # compute loss


                if show_loss: print('Training loss: ' + str(loss.numpy()))

            # Gets gradients and applies them
            grads = g.gradient(loss, variables_to_optimize)
            optimizer.apply_gradients(zip(grads, variables_to_optimize))

        if evaluation:
            # get metrics
            train_acc, train_miou = get_metrics(loader, model, loader.n_classes, train=True, preprocess_mode=preprocess_mode)
            test_acc, test_miou = get_metrics(loader, model, loader.n_classes, train=False, flip_inference=False,
                                              scales=[1], preprocess_mode=preprocess_mode)

            print('Train accuracy: ' + str(train_acc.numpy()))
            print('Train miou: ' + str(train_miou))
            print('Test accuracy: ' + str(test_acc.numpy()))
            print('Test miou: ' + str(test_miou))
            print('')

            # save model if bet
            if test_miou > best_miou:
                best_miou = test_miou
                saver.save(name_best_model)
        else:
              saver.save(name_best_model)

        loader.suffle_segmentation()  # sheffle trainign set
    def predict_cascade(self, mode_ds='test_ds'):

        # TODO: Hardcoding
        labels_binary = {"REST": 0, "NV": 1}
        labels_multi6 = {
            "MEL": 0,
            "NV": 1,
            "BCC": 2,
            "AKIEC": 3,
            "BKL": 4,
            "DF": 5,
            "VASC": 6
        }
        labels_binary_inv = {v: k for k, v in labels_binary.items()}
        labels_multi6_inv = {v: k for k, v in labels_multi6.items()}

        print('aaa')
        self.config.exp_cascade_name = 'binary'
        pkl_path = os.path.join(self.config.output_path, self.config.exp_name,
                                self.config.exp_cascade_name, 'pkl')
        with open(os.path.join(pkl_path, 'labels_gt.pkl'), 'rb') as handle:
            labels_gt_binary = pickle.load(handle)
        with open(os.path.join(pkl_path, 'labels_pred_prob.pkl'),
                  'rb') as handle:
            labels_pred_prob_binary = pickle.load(handle)
        with open(os.path.join(pkl_path, 'labels_pred_cls.pkl'),
                  'rb') as handle:
            labels_pred_cls_binary = pickle.load(handle)

        self.config.exp_cascade_name = 'multi6'
        pkl_path = os.path.join(self.config.output_path, self.config.exp_name,
                                self.config.exp_cascade_name, 'pkl')
        with open(os.path.join(pkl_path, 'labels_gt.pkl'), 'rb') as handle:
            labels_gt_multi6 = pickle.load(handle)
        with open(os.path.join(pkl_path, 'labels_pred_prob.pkl'),
                  'rb') as handle:
            labels_pred_prob_multi6 = pickle.load(handle)
        with open(os.path.join(pkl_path, 'labels_pred_cls.pkl'),
                  'rb') as handle:
            labels_pred_cls_multi6 = pickle.load(handle)

        labels_pred_prob = []
        labels_pred_cls = []
        labels_gt = labels_gt_binary

        # for lgb, lpb, lcb, lgm, lpm, lcm in zip(labels_gt_binary, labels_pred_prob_binary, labels_pred_cls_binary, labels_gt_multi6, labels_pred_prob_multi6, labels_pred_cls_multi6):

        for i in range(len(labels_gt_binary)):
            # binary
            #if (labels_pred_cls_binary[i] == labels_binary['NV']) and (np.max(labels_pred_prob_binary[i]) > 0.5):
            if (labels_pred_cls_binary[i] == labels_binary['NV']):
                labels_pred_cls.append(labels_pred_cls_binary[i])

                prob = 7 * [
                    labels_pred_prob_binary[i][labels_binary['REST']] / 6.0
                ]
                prob[labels_binary['NV']] = labels_pred_prob_binary[i][
                    labels_binary['NV']]
                labels_pred_prob.append(prob)
                #labels_pred_prob.append(np.max(labels_pred_prob_binary[i]))

            # multi6
            else:
                labels_pred_cls.append(labels_pred_cls_multi6[i])
                labels_pred_prob.append(labels_pred_prob_multi6[i])

        ## Plot ROC curve
        #utils.gen_roc_curve(self.config, labels_gt, labels_pred_prob, mode_ds)

        ## Plot PR Curve
        utils.gen_precision_recall_curve(self.config, labels_pred_cls,
                                         labels_pred_prob, mode_ds)

        ## Confusion matrix
        # confusion = tf.confusion_matrix(labels=labels_gt, predictions=labels_pred_cls, num_classes=self.config.num_classes)
        # logging.debug('Row(GT), Col(Pred)')
        # with tf.Session() as sess:
        #     print(sess.run(confusion))

        ## Plot and save confusion matrix
        utils.get_confusion_matrix(self.config, labels_gt, labels_pred_cls,
                                   mode_ds)

        ## Print PR and F1
        utils.summary_pr_fscore(self.config, labels_gt, labels_pred_cls,
                                self.config.labels)

        ## Plot Metrics
        utils.get_metrics(self.config, labels_gt, labels_pred_cls, mode_ds)
예제 #3
0
    model = Segception.Efficient(num_classes=n_classes, weights='imagenet', input_shape=(None, None, channels))

    # optimizer
    learning_rate = tfe.Variable(lr)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    # Init models (optional, just for get_params function)
    init_model(model, input_shape=(batch_size, width, height, channels))

    variables_to_restore = model.variables
    variables_to_save = model.variables
    variables_to_optimize = model.variables

    # Init saver. can use also ckpt = tfe.Checkpoint((model=model, optimizer=optimizer,learning_rate=learning_rate, global_step=global_step)
    saver_model = tfe.Saver(var_list=variables_to_save)
    restore_model = tfe.Saver(var_list=variables_to_restore)

    # restore if model saved and show number of params
    restore_state(restore_model, name_best_model)
    get_params(model)

    train(loader=loader, optimizer=optimizer, model=model, epochs=epochs, batch_size=batch_size, augmenter='segmentation', lr=learning_rate,
          init_lr=lr, saver=saver_model, variables_to_optimize=variables_to_optimize, name_best_model=name_best_model,
          evaluation=True, aux_loss=False, preprocess_mode=preprocess_mode)

    # Test best model
    print('Testing model')
    test_acc, test_miou = get_metrics(loader, model, loader.n_classes, train=False, flip_inference=True, scales=[0.75,  1, 1.5],
                                      write_images=True, preprocess_mode=preprocess_mode, time_exect=False)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou))
    def predict(self, mode_ds='test_ds', mixed=False):

        if mode_ds == 'train_ds':
            image_paths = utils_image.get_images_path_list_from_dir(
                self.config.tfrecords_path_train, img_format='jpg')
        elif mode_ds == 'val_ds':
            images_path = utils_image.get_images_path_list_from_dir(
                self.config.tfrecords_path_val, img_format='jpg')
        elif mode_ds == 'test_ds':
            images_path = utils_image.get_images_path_list_from_dir(
                self.config.tfrecords_path_test, img_format='jpg')
        else:
            logging.error('Unknown mode_ds {}', mode_ds)
            exit(1)

        ## Get image-label mapping
        image_label_dict = {}
        dataset_labels_file_path = 'datasets/densenet/ISIC2018_Task3_Training_GroundTruth.csv'
        with open(dataset_labels_file_path) as csvfile:
            read_csv = csv.reader(csvfile, delimiter=',')
            for index, row in enumerate(read_csv):
                ## Skip header
                if index == 0:
                    continue
                label_one_hot_encoding = [
                    int(round(float(row[i + 1]), 0)) for i in range(7)
                ]
                image_label_dict[row[0]] = np.argmax(label_one_hot_encoding)

        ## Sample n images
        # random.shuffle(images_path)
        images_path = images_path[0:self.config.predict_num_images]

        ## Get labels_gt
        labels_gt = []
        for image_path in images_path:
            # TODO: Image name should have no dot
            # image_name = os.path.basename(image_path).split('.', 1)[0]
            image_name = os.path.basename(image_path).rsplit('.', 1)[0]
            labels_gt.append(image_label_dict[image_name])

        images_path = images_path[0:self.config.predict_num_images]
        images = []
        for image_path in images_path:

            ## Load image
            image = Image.open(image_path)

            ## Resize and center crop image. size: (width, height)
            image = ImageOps.fit(
                image,
                (self.config.tfr_image_width, self.config.tfr_image_height),
                Image.LANCZOS, 0, (0.5, 0.5))

            ## Preprocess images
            image = np.float32(np.array(image))
            image = self.data.preprocess_data(image)

            images.append(image)

        features = np.array(images)
        logging.debug('model_name {}'.format(self.config.model_name))
        logging.debug('features {}'.format(features.shape))

        ## Predict in batches
        num_images = len(images_path)
        batch_size = self.config.batch_size_pred
        iters = int(num_images / batch_size)
        logging.debug('num_images {}'.format(num_images))
        logging.debug('batch_size {}'.format(batch_size))
        labels_pred_cls = []
        labels_pred_prob = []

        idx_start = 0
        idx_end = 0
        for iter_no in range(iters):
            idx_start = iter_no * batch_size
            idx_end = idx_start + batch_size
            logging.debug('idx:[{}-{}]'.format(idx_start, idx_end))

            labels_pred_prob_batch, labels_pred_cls_batch = self.sess.run(
                [
                    self.model.labels_pred_prob,
                    self.model.labels_pred_cls,
                ],
                feed_dict={
                    self.model.features: features[idx_start:idx_end],
                })

            logging.debug('labels_gt             {}'.format(
                np.array(labels_gt[idx_start:idx_end])))
            logging.debug(
                'labels_pred_cls_batch {}'.format(labels_pred_cls_batch))
            # logging.debug('labels_pred_prob_batch {}'.format(labels_pred_prob_batch))

            labels_pred_cls = labels_pred_cls + labels_pred_cls_batch.tolist()
            labels_pred_prob = labels_pred_prob + labels_pred_prob_batch.tolist(
            )

        ## For images < batch_size and For images which do not fit the last batch
        idx_start = iters * batch_size
        idx_end = idx_start + (num_images % batch_size)
        logging.debug('idx:[{}-{}]'.format(idx_start, idx_end))
        if (num_images % batch_size):
            labels_pred_prob_batch, labels_pred_cls_batch = self.sess.run(
                [
                    self.model.labels_pred_prob,
                    self.model.labels_pred_cls,
                ],
                feed_dict={
                    self.model.features: features[idx_start:idx_end],
                })
            logging.debug('labels_gt             {}'.format(
                labels_gt[idx_start:idx_end]))
            logging.debug(
                'labels_pred_cls_batch {}'.format(labels_pred_cls_batch))
            # logging.debug('labels_pred_prob_batch {}'.format(labels_pred_prob_batch))

            labels_pred_cls = labels_pred_cls + labels_pred_cls_batch.tolist()
            labels_pred_prob = labels_pred_prob + labels_pred_prob_batch.tolist(
            )

        for label_gt, label_pred_cls in zip(labels_gt, labels_pred_cls):
            print('GT, PRED: [{}, {}]'.format(label_gt, label_pred_cls))

        pkl_path = os.path.join(self.config.output_path, self.config.exp_name,
                                self.config.exp_cascade_name, 'pkl')
        os.makedirs(pkl_path, exist_ok=True)

        with open(os.path.join(pkl_path, 'labels_gt.pkl'), 'wb') as handle:
            pickle.dump(labels_gt, handle, protocol=pickle.HIGHEST_PROTOCOL)
        with open(os.path.join(pkl_path, 'labels_pred_prob.pkl'),
                  'wb') as handle:
            pickle.dump(labels_pred_prob,
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)
        with open(os.path.join(pkl_path, 'labels_pred_cls.pkl'),
                  'wb') as handle:
            pickle.dump(labels_pred_cls,
                        handle,
                        protocol=pickle.HIGHEST_PROTOCOL)

        ### ANALYSIS ###

        ## Plot ROC curve
        utils.gen_roc_curve(self.config, labels_gt, labels_pred_prob, mode_ds)

        ## Plot PR Curve
        utils.gen_precision_recall_curve(self.config, labels_pred_cls,
                                         labels_pred_prob, mode_ds)

        ## Confusion matrix
        # confusion = tf.confusion_matrix(labels=labels_gt, predictions=labels_pred_cls, num_classes=self.config.num_classes)
        # logging.debug('Row(GT), Col(Pred)')
        # with tf.Session() as sess:
        #     print(sess.run(confusion))

        ## Plot and save confusion matrix
        utils.get_confusion_matrix(self.config, labels_gt, labels_pred_cls,
                                   mode_ds)

        ## Print PR and F1
        utils.summary_pr_fscore(self.config, labels_gt, labels_pred_cls,
                                self.config.labels)

        ## Plot Metrics
        utils.get_metrics(self.config, labels_gt, labels_pred_cls, mode_ds)
예제 #5
0
def _train(loader,
           optimizer,
           loss_function,
           model,
           config=None,
           lr=None,
           evaluation=True,
           name_best_model='weights/best',
           preprocess_mode=None):
    # Parameters for training
    training_samples = len(loader.image_train_list)
    steps_per_epoch = int(training_samples / config['batch_size']) + 1
    best_miou = 0
    log_freq = min(50, int(steps_per_epoch / 5))
    avg_loss = tf.keras.metrics.Mean(name='loss', dtype=tf.float32)
    train_summary_writer = tf.summary.create_file_writer(
        '/tmp/summaries/train')  # tensorboard
    test_summary_writer = tf.summary.create_file_writer(
        '/tmp/summaries/test')  # tensorboard
    print('Please enter in terminal: tensorboard --logdir /tmp/summaries')

    for epoch in range(config['epochs']):  # for each epoch
        start_time_epoch = time.time()
        lr_decay(lr, config['init_lr'], 1e-9, epoch,
                 config['epochs'] - 1)  # compute the new lr
        print('epoch: ' + str(epoch + 1) + '. Learning rate: ' +
              str(lr.numpy()))

        for step in range(steps_per_epoch):  # for every batch

            # get batch
            x, y, mask = loader.get_batch(size=config['batch_size'],
                                          train=True)

            x = preprocess(x, mode=preprocess_mode)

            with train_summary_writer.as_default():
                loss = train_step(
                    model, x, y, mask, loss_function, optimizer,
                    (config['height_train'], config['width_train']),
                    config['zoom_augmentation'])
                # tensorboard
                avg_loss.update_state(loss)
                if tf.equal(optimizer.iterations % log_freq, 0):
                    tf.summary.scalar('loss',
                                      avg_loss.result(),
                                      step=optimizer.iterations)
                    avg_loss.reset_states()

        if evaluation:
            # get metrics

            # with train_summary_writer.as_default():
            #     train_acc, train_miou = get_metrics(loader, model, loader.n_classes, train=True, flip_inference=False, preprocess_mode=preprocess_mode, optimizer=optimizer)

            with test_summary_writer.as_default():
                test_acc, test_miou = get_metrics(
                    loader,
                    model,
                    loader.n_classes,
                    train=False,
                    flip_inference=False,
                    preprocess_mode=preprocess_mode,
                    optimizer=optimizer,
                    scales=[1])

            # print('Train accuracy: ' + str(train_acc.numpy()))
            # print('Train miou: ' + str(train_miou.numpy()))
            print('Test accuracy: ' + str(test_acc.numpy()))
            print('Test miou: ' + str(test_miou.numpy()))

            # save model if best model
            if test_miou.numpy() > best_miou:
                best_miou = test_miou.numpy()
                model.save_weights(name_best_model)

            print('Current Best model miou: ' + str(best_miou))
            print('')

        else:
            model.save_weights(name_best_model)

        loader.suffle_segmentation()  # sheffle training set every epoch
        print('Epoch time seconds: ' + str(time.time() - start_time_epoch))
예제 #6
0
def train(n_classes=11,
          batch_size=16,
          epochs=100,
          width=960,
          height=720,
          crop_factor_x=2,
          crop_factor_y=1.25,
          init_lr=1e-4,
          median_frequency=.15,
          zoom_augmentation=.2,
          dataset_path='datasets/endoscopy',
          weights_path='weights/endoscopy/model',
          preprocess='imagenet'):

    CONFIG = {}
    CONFIG['n_classes'] = n_classes
    CONFIG['batch_size'] = batch_size
    CONFIG['epochs'] = epochs
    CONFIG['width'] = width
    CONFIG['height'] = height
    CONFIG['crop_factor_x'] = crop_factor_x
    CONFIG['crop_factor_y'] = crop_factor_y
    CONFIG['width_train'] = int(
        CONFIG['width'] /
        CONFIG['crop_factor_x'])  # will be cropped from width_test size
    CONFIG['height_train'] = int(
        CONFIG['height'] /
        CONFIG['crop_factor_y'])  # will be cropped from height_test size
    CONFIG['init_lr'] = init_lr
    CONFIG['median_frequency'] = median_frequency
    CONFIG['zoom_augmentation'] = zoom_augmentation
    CONFIG['dataset_path'] = dataset_path
    CONFIG['weights_path'] = weights_path
    CONFIG['preprocess'] = preprocess

    assert CONFIG['width'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['width_train']
    assert CONFIG['height'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['height_train']

    # GPU to use
    n_gpu = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(n_gpu)
    # Loader
    loader = Loader.Loader(dataFolderPath=CONFIG['dataset_path'],
                           n_classes=CONFIG['n_classes'],
                           width=CONFIG['width'],
                           height=CONFIG['height'],
                           median_frequency=CONFIG['median_frequency'])
    print('Dataset loaded...')
    # build model
    #model = MiniNetv2.MiniNetv2p(num_classes=CONFIG['n_classes'])
    model = ResNet50.ResNet50Seg(CONFIG['n_classes'],
                                 input_shape=(None, None, 3),
                                 weights='imagenet')

    # optimizer
    learning_rate = tf.Variable(CONFIG['init_lr'])
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    # restore if model saved and show number of params
    restore_state(model, CONFIG['weights_path'])

    init_model(model, (1, CONFIG['width'], CONFIG['height'], 3))
    get_params(model)

    # Train
    print('Training...')
    _train(loader=loader,
           optimizer=optimizer,
           loss_function=loss_function,
           model=model,
           config=CONFIG,
           lr=learning_rate,
           name_best_model=CONFIG['weights_path'],
           evaluation=True,
           preprocess_mode=CONFIG['preprocess'])

    print('Testing model')
    test_acc, test_miou = get_metrics(loader,
                                      model,
                                      loader.n_classes,
                                      train=False,
                                      flip_inference=True,
                                      scales=[1, 2, 1.5, 0.5, 0.75],
                                      write_images=True,
                                      preprocess_mode=CONFIG['preprocess'],
                                      time_exect=True)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou.numpy()))
예제 #7
0
def train(loader,
          optimizer,
          loss_function,
          model,
          size_input,
          epochs=5,
          batch_size=2,
          lr=None,
          init_lr=2e-4,
          evaluation=True,
          name_best_model='weights/best',
          preprocess_mode=None,
          labels_resize_factor=1):
    # Parameters for training
    training_samples = len(loader.image_train_list)
    steps_per_epoch = int(training_samples / batch_size) + 1
    best_miou = 0
    log_freq = min(50, int(steps_per_epoch / 5))
    avg_loss = tf.keras.metrics.Mean(name='loss', dtype=tf.float32)
    train_summary_writer = tf.summary.create_file_writer(
        '/tmp/summaries/train')  # tensorboard
    test_summary_writer = tf.summary.create_file_writer(
        '/tmp/summaries/test')  # tensorboard
    print('Please enter in terminal: tensorboard --logdir \\tmp\\summaries')

    for epoch in range(epochs):  # for each epoch
        lr_decay(lr, init_lr, 1e-9, epoch, epochs - 1)  # compute the new lr
        print('epoch: ' + str(epoch + 1) + '. Learning rate: ' +
              str(lr.numpy()))
        for step in range(steps_per_epoch):  # for every batch
            # get batch
            x, y, mask = loader.get_batch(size=batch_size, train=True)
            x = preprocess(x, mode=preprocess_mode)

            with train_summary_writer.as_default():
                loss = train_step(model, x, y, mask, loss_function, optimizer,
                                  labels_resize_factor, size_input)
                # tensorboard
                avg_loss.update_state(loss)
                if tf.equal(optimizer.iterations % log_freq, 0):
                    tf.summary.scalar('loss',
                                      avg_loss.result(),
                                      step=optimizer.iterations)
                    avg_loss.reset_states()

        if evaluation:
            # get metrics
            with train_summary_writer.as_default():
                train_acc, train_miou = get_metrics(
                    loader,
                    model,
                    loader.n_classes,
                    train=True,
                    preprocess_mode=preprocess_mode,
                    labels_resize_factor=labels_resize_factor,
                    optimizer=optimizer)

            with test_summary_writer.as_default():
                test_acc, test_miou = get_metrics(
                    loader,
                    model,
                    loader.n_classes,
                    train=False,
                    flip_inference=False,
                    scales=[1],
                    preprocess_mode=preprocess_mode,
                    labels_resize_factor=labels_resize_factor,
                    optimizer=optimizer)

            print('Train accuracy: ' + str(train_acc.numpy()))
            print('Train miou: ' + str(train_miou.numpy()))
            print('Test accuracy: ' + str(test_acc.numpy()))
            print('Test miou: ' + str(test_miou.numpy()))
            print('')

            # save model if bet
            if test_miou > best_miou:
                best_miou = test_miou
                model.save_weights(name_best_model)
        else:
            model.save_weights(name_best_model)

        loader.suffle_segmentation()  # sheffle trainign set
예제 #8
0
    train(loader=loader,
          optimizer=optimizer,
          loss_function=loss_function,
          model=model,
          size_input=(height_train, width_train),
          epochs=epochs,
          batch_size=batch_size,
          lr=learning_rate,
          init_lr=lr,
          name_best_model=name_best_model,
          evaluation=True,
          preprocess_mode=preprocess_mode,
          labels_resize_factor=labels_resize_factor)

    get_params(model)

    print('Testing model')
    test_acc, test_miou = get_metrics(
        loader,
        model,
        loader.n_classes,
        train=False,
        flip_inference=True,
        scales=[1],
        write_images=True,
        preprocess_mode=preprocess_mode,
        time_exect=False,
        labels_resize_factor=labels_resize_factor)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou.numpy()))