Пример #1
0
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument('-has_supplement',
                        "--has_supplement",
                        default=True,
                        help="supplement")

    args = parser.parse_args()

    # load train data set
    with utils.timer('Load image files'):
        if (args.phase == 'train'):
            image_files, labels = data_utils.load_files(
                args.train_input, train_data_source, sample_rate)
        if (args.has_supplement):
            image_files_supplement, labels_supplement = data_utils.load_files(
                '%s_supplement' % args.train_input,
                train_data_source_supplement, sample_rate)
            print('before supplement %s' % len(image_files))
            #image_files.extend(image_files_supplement)
            image_files = np.concatenate([image_files, image_files_supplement],
                                         axis=0)
            print('after supplement %s' % len(image_files))
            #labels.extend(labels_supplement)
            labels = np.concatenate([labels, labels_supplement], axis=0)
        print('image files %s' % len(image_files))

    with tf.Session() as sess:
        input_type = InputType[args.input_type.upper()]
Пример #2
0
def extract_nsfw_features(labeled_image_root_dir,
                          image_input_type,
                          image_loader_type,
                          model_dir,
                          has_supplement=False,
                          phase='train',
                          return_image_files=False):
    # load train data set
    with utils.timer('Load image files'):
        if (phase == 'train'):
            image_files, labels = data_utils.load_files(
                labeled_image_root_dir, train_data_source, sample_rate)
        else:
            image_files, labels = data_utils.load_files(
                labeled_image_root_dir, test_data_source, sample_rate)
        if (has_supplement):
            for part in [
                    'test_0819_part1_1', 'test_0819_part1_2',
                    'test_0819_part1_3', 'test_0819_part3_1'
            ]:
                supplement_dir = '{}/{}'.format(
                    '/'.join(labeled_image_root_dir.split('/')[:-1]), part)
                image_files_supplement, labels_supplement = data_utils.load_files(
                    supplement_dir, 'test_0819_part1', sample_rate)
                print('before supplement %s' % len(image_files))
                image_files = np.concatenate(
                    [image_files, image_files_supplement], axis=0)
                print('after supplement %s' % len(image_files))
                labels = np.concatenate([labels, labels_supplement], axis=0)
        print('image files %s' % len(image_files))

    X_train = []
    y_train = []
    # transform original image into nsfw features
    with tf.Session(graph=tf.Graph()) as sess:

        input_type = InputType[image_input_type.upper()]

        # function of loading image
        fn_load_image = None
        if input_type == InputType.TENSOR:
            if image_loader_type == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        # load model
        with utils.timer('Load model'):
            tf.saved_model.loader.load(sess, ["serve"], model_dir)
            graph = tf.get_default_graph()
            # extract tensor from graph
            input_image = graph.get_tensor_by_name("input:0")
            projected_features = graph.get_tensor_by_name('nsfw_features:0')
            predict_proba = graph.get_tensor_by_name("predictions:0")

        nsfw_batch_size = 512
        # extract projection features
        with utils.timer('Projection with batching'):
            start = 0
            end = start + nsfw_batch_size
            while (start < len(image_files)):
                if (end > len(image_files)):
                    end = len(image_files)
                with utils.timer('batch(%s) prediction' % nsfw_batch_size):
                    batch_images = np.array([
                        fn_load_image(image_files[i]).tolist()
                        for i in range(start, end)
                    ])
                    X_train.extend(
                        sess.run(projected_features,
                                 feed_dict={
                                     input_image: batch_images
                                 }).tolist())
                    y_train.extend(labels[start:end])
                print('projection %s done.' % end)
                start = end
                end = start + nsfw_batch_size
                del batch_images
                gc.collect()
    sess.close()

    # sanity check
    assert len(y_train) == len(labels)

    if (return_image_files == True):
        return np.array(X_train), np.array(y_train), image_files
    else:
        return np.array(X_train), np.array(y_train)
Пример #3
0
def main(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument(
        '-i',
        "--input_dir",
        default=config.test_data_set[data_source],
        help="Path to the input image. Only jpeg images are supported.")

    parser.add_argument("-m",
                        "--model_weights",
                        default=config.nsfw_model_weight_file,
                        help="Path to trained model weights file")

    parser.add_argument("-l",
                        "--image_loader",
                        default=IMAGE_LOADER_YAHOO,
                        help="image loading mechanism",
                        choices=[IMAGE_LOADER_YAHOO, IMAGE_LOADER_TENSORFLOW])

    parser.add_argument("-t",
                        "--input_type",
                        default=InputType.TENSOR.name.lower(),
                        help="input type",
                        choices=[
                            InputType.TENSOR.name.lower(),
                            InputType.BASE64_JPEG.name.lower()
                        ])

    args = parser.parse_args()

    with utils.timer('Load image files'):
        image_files, labels = data_utils.load_files(args.input_dir,
                                                    data_source, sample_rate)
        print('image files %s' % len(image_files))

    model = OpenNsfwModel()

    predictions = []

    with tf.Session() as sess:

        input_type = InputType[args.input_type.upper()]

        with utils.timer('Load model weight'):
            model.build(weights_path=args.model_weights, input_type=input_type)

        fn_load_image = None

        if input_type == InputType.TENSOR:
            if args.image_loader == IMAGE_LOADER_TENSORFLOW:
                fn_load_image = create_tensorflow_image_loader(sess)
            else:
                fn_load_image = create_yahoo_image_loader()
        elif input_type == InputType.BASE64_JPEG:
            import base64
            fn_load_image = lambda filename: np.array(
                [base64.urlsafe_b64encode(open(filename, "rb").read())])

        sess.run(tf.global_variables_initializer())

        with utils.timer('Prediction'):
            start = 0
            end = start + config.batch_size
            while (start < len(image_files)):
                if (end > len(image_files)):
                    end = len(image_files)
                with utils.timer('Batch[%s] prediction' % config.batch_size):
                    batch_images = [
                        fn_load_image(image_files[i])
                        for i in range(start, end)
                    ]
                    predictions.extend(
                        sess.run(model.predictions,
                                 feed_dict={model.input: batch_images})[:, 1])
                print('Prediction %s done.' % end)
                start = end
                end = start + config.batch_size

    # save
    PredictOutputFile = '%s/%s.csv' % (config.TestOutputDir, data_source)
    with utils.timer('Save predictions'):
        data_utils.save_predictions(image_files, labels, predictions,
                                    PredictOutputFile)

    # visualization on threshold for f1/precision/recall
    if (data_source == 'hisotry'):
        output_image_file = '%s/%s_vs_threshold.jpg' % (config.TestOutputDir,
                                                        level)
        with utils.timer('Save visualization for threshold'):
            plot_utils.threshold_vs_toxic(labels, predictions, level,
                                          output_image_file)
Пример #4
0
def evaluate(config_task, ids, model, outdir='eval_out', epoch_num=0):
    """
    evalutation
    """
    files = load_files(ids)
    files = list(files)

    datDir = os.path.join(config.prepData_dir, config_task.task, "Tr")
    dices_list = []

    # files = files[:2] # debugging.
    logger.info('Evaluating epoch{} for {}--- {} cases:\n{}'.format(
        epoch_num, config_task.task, len(files),
        str([obj['id'] for obj in files])))
    for obj in tqdm(files, desc='Eval epoch{}'.format(epoch_num)):
        ID = obj['id']
        # logger.info('evaluating {}:'.format(ID))
        obj['im'] = os.path.join(config.base_dir, config_task.task, "imagesTr",
                                 ID)
        obj['gt'] = os.path.join(config.base_dir, config_task.task, "labelsTr",
                                 ID)
        img_path = os.path.join(config.base_dir, config_task.task, "imagesTr",
                                ID)
        gt_path = os.path.join(config.base_dir, config_task.task, "labelsTr",
                               ID)

        data = get_eval_data(obj, datDir)
        # final_label, probs = segment_one_image(config_task, data, model) # final_label: d, h, w, num_classes

        try:
            final_label = segment_one_image(
                config_task, data, model,
                ID)  # final_label: d, h, w, num_classes
            save_to_nii(final_label,
                        filename=ID + '.nii.gz',
                        refer_file_path=img_path,
                        outdir=outdir,
                        mode="label",
                        prefix='Epoch{}_'.format(epoch_num))

            gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_path))  # d, h, w
            # treat cancer as organ for Task03_Liver and Task07_Pancreas
            if config_task.task in ['Task03_Liver', 'Task07_Pancreas']:
                gt[gt == 2] = 1

            # cal dices
            dices = multiClassDice(gt, final_label, config_task.num_class)
            dices_list.append(dices)

            tinies.sureDir(outdir)
            fo = open(os.path.join(outdir,
                                   '{}_eval_res.csv'.format(config_task.task)),
                      mode='a+')
            wo = csv.writer(fo, delimiter=',')
            wo.writerow([epoch_num, tinies.datestr(), ID] + dices)
            fo.flush()

            ## for tensorboard visualization
            tb_img = sitk.GetArrayFromImage(sitk.ReadImage(img_path))  # d,h,w
            if tb_img.ndim == 4:
                tb_img = tb_img[0, ...]
            train.tb_images([tb_img, gt, final_label], [False, True, True],
                            ['image', 'GT', 'PS'],
                            epoch_num * config.step_per_epoch,
                            tag='Eval_{}_epoch_{}_dices_{}'.format(
                                ID, epoch_num, str(dices)))
        except Exception as e:
            logger.info('{}'.format(str(e)))

    labels = config_task.labels
    dices_all = np.asarray(dices_list)
    dices_mean = dices_all.mean(axis=0)
    logger.info('Eval mean dices:')
    dices_res = {}
    for i in range(config_task.num_class):
        tag = labels[str(i)]
        dices_res[tag] = dices_mean[i]
        logger.info('    {}, {}'.format(tag, dices_mean[i]))

    return dices_res