Beispiel #1
0
EPOCHS = 40
ENCODERS = [
    'se_resnext50_32x4d', 'densenet161', 'resnet50', 'efficientnet-b5',
    'timm-efficientnet-b5'
]
MODELS = ['PSPNet']  #'FPN', 'Unet', 'Linknet', 'DeepLabV3Plus']
COLORS_FILE_PATH = 'colors.csv'
LEGEND_FILE_PATH = 'legend.png'
ENCODER_WEIGHTS = 'imagenet'
ACTIVATION = 'sigmoid'
DEVICE = 'cpu'
SCRATCH_BASEPATH = '/sdk/scratch/segmentation/examples'
os.makedirs(SCRATCH_BASEPATH, mode=0o777, exist_ok=True)

# Initialize CatDB
catdb = CatDB()

# Get Training Image IDs
label_img_ids = catdb.multisearch('location', 'indoor', 'type', 'labels',
                                  'label_names', '!-', 'arrangement',
                                  '!arrangement1', 'arrangement',
                                  '!arrangement2')
raw_img_ids = []
for l_id in label_img_ids:
    tmp_item = catdb[l_id]
    raw_img_id = catdb.multisearch('arrangement', tmp_item['arrangement'],
                                   'scene', tmp_item['scene'], 'type', 'raw',
                                   'side', 'left', 'hazard', 'default',
                                   'modality', 'color')
    raw_img_ids.append(raw_img_id[0])
Beispiel #2
0
from cat_datasets import SegmentationDataset
from cat_utils import read_label_names, get_unique_classes, img_float_to_uint8

# Set important paths and defaults
EPOCHS = 40
ENCODER = 'se_resnext50_32x4d'
ENCODER_WEIGHTS = 'imagenet'
ACTIVATION = 'sigmoid'
DEVICE = 'cuda'
SCRATCH_BASEPATH = '/sdk/scratch/segmentation/examples'
os.makedirs(SCRATCH_BASEPATH, mode=0o777, exist_ok=True)

# Search for item ids from the CatDB
#  For this example, only use "books" scene with the (raw, left, color, default)
#  images that correspond to the label images
catdb = CatDB()
raw_img_ids = catdb.multisearch('scene', 'books', 'type', 'raw', 'side',
                                'left', 'hazard', 'default', 'modality',
                                'color')
label_img_ids = catdb.multisearch('scene', 'books', 'type', 'labels')
assert (len(raw_img_ids) == len(label_img_ids))

# Determine unique classes in the data
label_items = catdb[label_img_ids]
label_names_list = read_label_names(label_items)
unique_classes, unique_subclasses = get_unique_classes(label_names_list)
NUM_CLASSES = len(unique_classes) + 1  # Add in background
print("Unique Classes in the dataset: {}".format(unique_classes))

# Build a pretrained model for finetuning
model = smp.FPN(
Beispiel #3
0
sys.path.append('/sdk/catlib')
from cat_db import CatDB
from cat_utils import pprint, mdprint

# Import Local Packages
from wildcat_model import WILDCAT
from cat_sgm import (read_images, WILDCAT_costvolume, aggregate_costs,
                     select_disparity, get_recall, save_costvolume_image,
                     compute_costs, CATSGMParameters, SGMPaths)

# Set scratch path
SCRATCH_BASEPATH = '/sdk/scratch/cat_stereo/examples'
os.makedirs(SCRATCH_BASEPATH, mode=0o777, exist_ok=True)

# Initialize CatDB
catdb = CatDB()
metadb = catdb.get_metadb()

# Set defaults
PATCH_SIZE = (64, 64)
WEIGHTS_PATH = 'WILDCAT_RES_WEIGHTS.h5'
LEFT, RIGHT, DISP, MASK = 0, 1, 2, 3
TEST_SCENES = ['books']  #metadb['indoor_scene'] #['misc', 'materials']
TRAIN_SCENES = [s for s in metadb['indoor_scene'] if s not in TEST_SCENES]
ARRANGEMENTS = ['arrangement1', 'arrangement2']  #metadb['arrangement']
MODALITY = 'cross'


# Get Image ID Sets
def populate_id_sets(catdb, scenes, arrangements, modality):
    image_id_sets = []
Beispiel #4
0
# https://matplotlib.org/3.1.1/gallery/event_handling/ginput_manual_clabel_sgskip.html#sphx-glr-gallery-event-handling-ginput-manual-clabel-sgskip-py

import time
import numpy as np
import matplotlib.pyplot as plt

# Package imports
from cat_db import CatDB


class ImageViewer():
    def __init__(self, catdb):
        self.catdb = catdb

    def view_by_key(self, key):
        image = self.catdb[key]
        plt.title("Key: {}".format(key), fontsize=16)
        plt.plot([1, 2], [3, 4])
        plt.show()


if __name__ == "__main__":
    catdb = CatDB()
    viewer = ImageViewer(catdb)
    viewer.view_by_key(0)
Beispiel #5
0
MODEL_NAME = 'UNet'  # ['UNet', 'EGUNet', 'RTFNet']
DEVICE = 'cuda:2'
BATCH_SIZE = 4

EPOCHS = 100
COLORS_FILE_PATH = 'legend/colors.csv'
LEGEND_FILE_PATH = 'legend/legend.png'
MFDATASET_PATH = '/external/RTFNet/ir_seg_dataset/'
MFDATASET_TRAIN_SPLIT = 'train'
MFDATASET_VAL_SPLIT = 'val'
MFDATASET_TEST_SPLIT = 'val'
SCRATCH_BASEPATH = '/sdk/scratch/cat_segmentation/examples'
os.makedirs(SCRATCH_BASEPATH, mode=0o777, exist_ok=True)

# Initialize CatDB
catdb = CatDB()

# Get Training Image IDs
label_img_ids = catdb.multisearch('location', 'indoor', 'type', 'labels',
                                  'label_names', '!-')
cat_aligned_ids = []
for l_id in label_img_ids:
    tmp_item = catdb[l_id]
    cat_aligned_id = catdb.multisearch('arrangement', tmp_item['arrangement'],
                                       'scene', tmp_item['scene'], 'type',
                                       'left_ct_aligned')
    cat_aligned_ids.append(cat_aligned_id[0])

# Get Validation Image IDs
val_label_img_ids = catdb.multisearch(
    'location', 'indoor', 'type', 'labels', 'label_names', '!-', 'arrangement',