Пример #1
0
def main():
    source = os.path.join(utils.get_eyez_dir(), 'Recognition', 'Databases',
                          'Rot ScleraNet', 'stage2')
    for (by, bins, bin_labels) in (('age', (25, 40), ('-25', '26-40', '41-')),
                                   ('gender', None, None)):
        target = os.path.join(source, '..', f'stage2_{by}')
        group_by(source, target, by, bins, bin_labels)
Пример #2
0
def test_sift():
    model = DirectDistanceModel(SIFT())
    split = BaseSplit(
        Dataset(
            os.path.join(utils.get_eyez_dir(), 'Recognition', 'Databases',
                         'Rot ScleraNet', 'temp')))
    model.evaluate(split.gallery, split.probe)
Пример #3
0
def scleranet(layer='final_features', image_size=None, *args, **kw):
    model = load_model(
        os.path.join(get_eyez_dir(), 'Recognition', 'Models', 'Rot ScleraNet',
                     'id_dir_prediction.75-0.667.hdf5'))
    if image_size and image_size != (400, 400):
        model.layers.pop(0)
        model = Model(model.input, model.get_layer(layer).output)
        input_ = Input(shape=(*image_size, 3))
        model = Model(input_, model(input_))
        return base_nn_config(model, *args, input_size=image_size, **kw)
    return base_nn_config(Model(model.input,
                                model.get_layer(layer).output), *args, **kw)
Пример #4
0
    plt.imsave(os.path.join(tgt_root, file), p >= threshold, cmap='gray')
    shutil.copyfile(gt_f, os.path.join(tgt_root, gt_file))


def f1_score(precision, recall):
    if precision == recall == 0:
        return 0
    return 2 * precision * recall / (precision + recall)


def find_max_f1(precision_v, recall_v, threshold_v):
    if len(threshold_v) < len(precision_v):
        threshold_v = np.append(threshold_v, [1.])
    max_f1 = (None, float('-inf'))
    for (p, r, t) in zip(precision_v, recall_v, threshold_v):
        f1 = f1_score(p, r)
        if f1 > max_f1[1]:
            max_f1 = (t, f1)
    return max_f1


def rgb2gray(rgba):
    return np.dot(rgba[..., :3], [0.2989, 0.587, 0.114])


if len(sys.argv) > 1:
    binarise(*sys.argv[1:])
else:
    binarise(os.path.join(get_eyez_dir(), 'Segmentation', 'Results',
                          'Vessels'))
            with open(save_name.format(basename, 'channels', '.json'),
                      'w' if overwrite else 'x') as save_file:
                json.dump(mask, save_file, indent=4, sort_keys=True)
        except FileExistsError:
            pass
    if all_types or 'csv' in save_type:
        import csv
        try:
            with open(save_name.format(basename, 'channels', '.csv'),
                      'w' if overwrite else 'x') as save_file:
                w = csv.DictWriter(save_file, mask.keys(), delimiter=';')
                w.writeheader()
                w.writerow(mask)
        except FileExistsError:
            pass

    return counter


if __name__ == '__main__':
    source = sys.argv[1] if len(sys.argv) > 1 else os.path.join(
        get_eyez_dir(), 'SBVPI', 'SBVPI_vessels')
    target = sys.argv[2] if len(sys.argv) > 2 else os.path.join(
        source, '..', 'SBVP_with_masks')
    annotations_to_masks(source,
                         target,
                         save_type='img',
                         plot=False,
                         overwrite=False,
                         logging_file='')
Пример #6
0
import os
from PIL import Image
import re
import sys

sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from utils import get_eyez_dir

PRIMARY_CHANNELS = ('periocular', 'sclera')
SECONDARY_CHANNELS = ('canthus', 'eyelashes', 'iris', 'pupil', 'vessels')
IMG_EXTS = ('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.tiff')
NAMING = r'\d+[LR]_[lrsu]_\d+'

# Defaults
SIZE = (3000, 1700)
SRC = os.path.join(get_eyez_dir(), 'SBVPI', 'SBVPI_with_masks')


def resize(size=SIZE,
           source=SRC,
           target=None,
           check_for_channels=True,
           convert_original=False):
    if isinstance(size, str):
        size = literal_eval(size)
    if not os.path.isdir(source):
        raise ValueError(f"{source} is not a directory.")
    if not target:
        target = os.path.join(source, '..', 'Resized',
                              'x'.join(str(i) for i in size))
    if isinstance(check_for_channels, str):
Пример #7
0
		delayed(_process_image)(fname, source, target, flipped_target, max_cls)
		for (source, target, flipped_target) in zip(source_dirs, target_dirs_original, target_dirs_flipped)
		for fname in os.listdir(source)
	)


def _process_image(fname, source, target, flipped_target, max_cls):
	f = os.path.join(source, fname)
	basename, ext = os.path.splitext(fname)
	if not os.path.isfile(f) or ext.lower() not in IMG_EXTS or not re.match(r'\d+[LR]_[lrsu]_\d+', basename):
		return
		
	print(f"Processing file {fname}")
	
	target_f = os.path.join(target, fname)
	shutil.copyfile(f, target_f)

	img = Image.open(f).transpose(Image.FLIP_LEFT_RIGHT)
	new_basename = basename.split('_')
	new_basename[0] = str(int(new_basename[0][:-1]) + max_cls) + multi_replace(new_basename[0][-1], {'R': 'L', 'L': 'R'})
	new_basename[1] = multi_replace(new_basename[1], {'l': 'r', 'r': 'l'})
	new_basename = '_'.join(new_basename)
	img.save(os.path.join(flipped_target, f'{new_basename}{ext}'))


if __name__ == '__main__':
	source = sys.argv[1] if len(sys.argv) > 1 else os.path.join(get_eyez_dir(), 'SBVPI', 'SBVPI_with_masks')
	target = sys.argv[2] if len(sys.argv) > 2 else os.path.join(source, '..', 'SBVPI_mirrored')
	flip(source, target)

Пример #8
0
def main():
    # Define file naming rules
    naming = NamingParser(r'ie_d_n',
                          eyes=r'LR',
                          directions=r'lrsu',
                          strict=True)
    both_eyes_same_class = False
    mirrored_offset = 0

    train = None
    if DATA['train']:
        train = Dataset(os.path.join(DATA_DIR, DATA['train']),
                        naming=naming,
                        both_eyes_same_class=both_eyes_same_class,
                        mirrored_offset=mirrored_offset)
        if GROUP_BY:
            train = train.group_by(GROUP_BY, BINS)
    test = Dataset(os.path.join(DATA_DIR, DATA['test']),
                   naming=naming,
                   both_eyes_same_class=both_eyes_same_class,
                   mirrored_offset=mirrored_offset)
    if GROUP_BY:
        test = test.group_by(GROUP_BY, BINS)

    if IMG_SIZE:
        models = (scleranet(image_size=IMG_SIZE),
                  *(descriptor(name, image_size=IMG_SIZE)
                    for name in ('sift', 'surf', 'orb')),
                  descriptor('sift', True, image_size=IMG_SIZE))
    else:
        models = (scleranet(), *(descriptor(name)
                                 for name in ('sift', 'surf', 'orb')),
                  descriptor('sift', True))
    labels = ("CNN", "SIFT", "SURF", "ORB", "dSIFT")

    # This is for plotting grouped, comment out otherwise
    #models = (models[I],)
    #labels = (labels[I],)

    res_path = [DATA['test']]
    if GROUP_BY:
        res_path.append(f'{GROUP_BY}{"_intergroup" if INTERGROUP else ""}')
    try:
        if BASE_DIRS < 4:
            res_path.append(
                f'{BASE_DIRS} direction{"s" if BASE_DIRS > 1 else ""} in base')
    except TypeError:
        dirs = {L: 'left', R: 'right', C: 'center', U: 'up'}
        res_path.append(
            f'{", ".join(dirs[d] for d in sorted(BASE_DIRS))} in base')
    res_dir = os.path.join(get_eyez_dir(), 'Recognition', 'Results', *res_path)
    os.makedirs(res_dir, exist_ok=True)

    group_suffix = '_{group}' if GROUP_BY else ''
    fold_suffix = '_fold{fold}' if K > 1 else ''
    # This is also for plotting grouped but doesn't need to be commented out
    label_suffix = f'-{labels[0]}' if GROUP_BY else ''

    font_size = 16 if SIZE == 'large' else 32
    legend_size = 16 if SIZE == 'large' else 24
    size_suffix = '-large' if SIZE == 'large' else ''

    painter = None
    if PLOT:
        painter = Painter(
            lim=(0, 1.01),
            xticks=np.linspace(0.2, 1, 5),
            yticks=np.linspace(0, 1, 6),
            colors=['r', 'b', 'g', 'purple', 'black'],
            k=(len(test) if GROUP_BY else 1) * K * len(models),
            # For grouped
            #labels=(
            #	[f"{key} (k = {k})" for key in test.keys() for k in range(K)] if GROUP_BY and K > 1
            #	else [f"{key}" for key in test.keys()] if GROUP_BY
            #	else [f"k = {k}" for k in range(K)]
            #),
            # Otherwise
            labels=labels,
            font='Times New Roman',
            font_size=font_size,
            legend_size=legend_size,
            pause_on_end=False)
        painter.init()
        painter.add_figure('EER', xlabel='Threshold', ylabel='FAR/FRR')
        painter.add_figure('ROC Curve',
                           save=os.path.join(
                               res_dir,
                               f'Sclera-ROC{label_suffix}{size_suffix}.eps'),
                           xlabel='FAR',
                           ylabel='VER',
                           legend_loc='lower right')
        painter.add_figure(
            'Semilog ROC Curve',
            save=os.path.join(
                res_dir, f'Sclera-ROC-log{label_suffix}{size_suffix}.eps'),
            xlabel='FAR',
            ylabel='VER',
            legend_loc='best',
            xscale='log',
            xlim=(1e-3, 1.01),
            xticks=(1e-3, 1e-2, 1e-1, 1),
            x_tick_formatter=exp_format)

    try:
        res_str = []
        for model, label in zip(models, labels):
            evaluation = CV(model)(
                train,
                test,
                K,
                base_split_n=BASE_DIRS,
                plot=painter,
                closest_only=True,
                intergroup_evaluation=INTERGROUP,
                save=os.path.join(res_dir, 'Distance Matrices',
                                  f'{label}{group_suffix}{fold_suffix}.pkl'),
                use_precomputed=LOAD)
            if GROUP_BY:
                res_str.append(f"{label}:\n\n" +
                               "\n\n".join(f"{k}:\n{str(v)}"
                                           for k, v in evaluation.items()))
            else:
                res_str.append(f"{label}:\n\n{str(evaluation)}")
            print(f"\n{'-' * 40}\n")
            print(res_str[-1])
            print(f"\n{'-' * 40}\n")
        if SAVE:
            with open(os.path.join(res_dir, f'Evaluation.txt'),
                      'w') as res_file:
                res_file.write("\n\n\n\n".join(res_str))
                res_file.write("\n")

    finally:
        if PLOT:
            painter.finalize()
Пример #9
0
# Should dataset be grouped by an attribute (such as age)? If not, set GROUP_BY to None.
GROUP_BY = None
#GROUP_BY = 'age'
#GROUP_BY = 'gender'
# Bins to group into. Ignored if GROUP_BY is None. For more info, see dataset.Dataset.group_by.
BINS = (25, 40)
#BINS = None
# Are we using intergroup evaluation? Ignored if GROUP_BY is None. See cross_validate.CV.cross_validate_grouped.
INTERGROUP = False

# Ignore this
I = 0

# Training and testing datasets. If no training is to be done, set train to None.
DATA_DIR = os.path.join(get_eyez_dir(), 'Recognition', 'Databases',
                        'Rot ScleraNet')
DATA = {'train': None, 'test': 'stage2'}
#DATA_DIR = os.path.join(get_eyez_dir(), 'Recognition', 'Databases')
#DATA = {'train': None, 'test': 'SBVPI Scleras'}


def main():
    # Define file naming rules
    naming = NamingParser(r'ie_d_n',
                          eyes=r'LR',
                          directions=r'lrsu',
                          strict=True)
    both_eyes_same_class = False
    mirrored_offset = 0
Пример #10
0
				for i, image in enumerate(data)
			)
	
def _process_image(image, src, tgt):
	print(image)
	source_dir, source_gt_dir = src
	target_dir, target_gt_dir = tgt
	
	shutil.copy(os.path.join(source_dir, image), target_dir)
	if source_gt_dir and target_gt_dir:
		for ext in ('.png', '.jpg', '.jpeg', '.bmp', '.gif', '.eps'):
			try:
				shutil.copy(os.path.join(source_gt_dir, f'{os.path.splitext(image)[0]}{ext}'), target_gt_dir)
				break
			except FileNotFoundError:
				pass


if len(sys.argv) > 1:
	split(*sys.argv[1:])
else:
	args = [None] * 6
	args[0] = os.path.join(get_eyez_dir(), 'Segmentation', 'Databases', 'Sclera', 'SSBC2019 + SBVPI', 'Images')
	args[1] = os.path.join(args[0], '..', 'train', 'Images')
	args[2] = os.path.join(args[0], '..', 'val', 'Images')
	args[3] = os.path.join(args[0], '..', 'Masks')
	args[4] = os.path.join(args[0], '..','train', 'Masks')
	args[5] = os.path.join(args[0], '..','val', 'Masks')
	split(*args)

Пример #11
0
from tqdm import tqdm, trange

sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from utils import get_eyez_dir

# Bootstrapping parameters
K = 1
ratio = 1

# Which config to use
cfg = 'sclera'

# Should precision/recall be loaded (True) or should they be computed anew (False)
load_pr = True

seg_results = os.path.join(get_eyez_dir(), 'Segmentation', 'Results')
if cfg == 'vessels':
    models = (
        'Coye', 'B-COSFIRE'
    )  #, 'Miura_MC', 'Miura_RLT', 'Miura_MC_norm', 'Miura_RLT_norm', 'agt', 'segnet')
    pred_dir = os.path.join(seg_results, 'Vessels')
    fig_file = os.path.join(pred_dir, 'Vessels_ROC.eps')
    zoom_file = os.path.join(pred_dir, 'Vessels_ROC_Zoomed.eps')
    f1_file = os.path.join(pred_dir, 'Vessels_Scores.txt')
    pr_file = os.path.join(pred_dir, '{}_precision_recall')
    #colors = iter([hsv_to_rgb((h, 1, 1)) for h in np.linspace(0, 1, len(models), endpoint=False)])
    #colors = iter(matplotlib.cm.get_cmap('plasma')(np.linspace(0, 1, len(models), endpoint=False)))
    colors = iter(['red', 'blue', 'green', 'black'])
    legend_loc = 'upper right'
elif cfg == 'sclera':
    models = ('RefineNet-50', 'RefineNet-101', 'UNet', 'SegNet')