コード例 #1
0
ファイル: segnet_triple_encode.py プロジェクト: thesby/dsb3
    "shear": [0, 0, 0],  # degrees
    "translation": [16, 16, 16],  # mms (from -128 to 128)
    "reflection": [0, 0, 0] #Bernoulli p
}

IMAGE_SIZE = 64

"Put in here the preprocessors for your data." \
"They will be run consequently on the datadict of the dataloader in the order of your list."
preprocessors = [
    AugmentOnlyPositive(tags=["luna:3d", "luna:segmentation"],
               output_shape=(IMAGE_SIZE,IMAGE_SIZE,IMAGE_SIZE),  # in pixels
               norm_patch_size=(IMAGE_SIZE,IMAGE_SIZE,IMAGE_SIZE),  # in mms
               augmentation_params=AUGMENTATION_PARAMETERS
               ),
    ZMUV("luna:3d", bias =  -648.59027, std = 679.21021),
]

#####################
#     training      #
#####################
"This is the train dataloader. We will train until this one stops loading data."
"You can set the number of epochs, the datasets and if you want it multiprocessed"
training_data = LunaDataLoader(
    only_positive=True,
    sets=TRAINING,
    epochs=30,
    preprocessors=preprocessors,
    multiprocess=True,
    crash_on_exception=True,
)
コード例 #2
0
ファイル: final_stage_example.py プロジェクト: thesby/dsb3
    mode="constant"
    )

preprocessors = [
    # DicomToHU(tags=[tag+"3d"]),
    augment_roi(
        augmentation_params={
            "scale": [1, 1, 1],  # factor
            "uniform scale": 1,  # factor
            "rotation": [0, 0, 180],  # degrees
            "shear": [0, 0, 0],  # degrees
            "translation": [5, 5, 5],  # mm
            "reflection": [0, 0, 0]},  # Bernoulli p
        ),
    # DefaultNormalizer(tags=[tag+"3d"])
    ZMUV(tag+"3d", bias=-648.59027, std=679.21021)
]

preprocessors_valid = [
    # DicomToHU(tags=[tag+"3d"]),
    augment_roi(),
    # DefaultNormalizer(tags=[tag+"3d"])
    ZMUV(tag+"3d", bias=-648.59027, std=679.21021)
]

#####################
#     training      #
#####################

"This is the train dataloader. We will train until this one stops loading data."
"You can set the number of epochs, the datasets and if you want it multiprocessed"
コード例 #3
0
ファイル: roi_stage1.py プロジェクト: thesby/dsb3
plot = False
# extract nodules in background thread
multiprocess = True

# the tag for the new data
tag = "stage1:"
# put in the pixelspacing tag if it's not loaded yet to be able to make patches
extra_tags=[]

# for building the segmentation model, the input tag should be replaced
replace_input_tags = {"luna:3d": tag+"3d"} #{old:new}

# prep before patches
preprocessors = [DicomToHU(tags=[tag+"3d"])]
# prep on the patches
postpreprocessors = [ZMUV(tag+"3d", bias =  -648.59027, std = 679.21021)]

data_loader= Stage1DataLoader(
    sets=[TRAINING, VALIDATION],
    preprocessors=preprocessors,
    epochs=1,
    multiprocess=False,
    crash_on_exception=True)

batch_size = 1 # only works with 1

# function to call to extract nodules from the fully reconstructed segmentation
def extract_nodules(segmentation):
    """segmentation is a 3D array"""
    rois = blob_dog(segmentation, min_sigma=1, max_sigma=15, threshold=0.1)
    if rois.shape[0] > 0:
コード例 #4
0
ファイル: slices_bn.py プロジェクト: thesby/dsb3
    Augment3D(
        tags=["bcolzall:3d"],
        output_shape=nn_input_shape,
        norm_patch_shape=norm_patch_shape,
        augmentation_params={
            "scale": [1, 1, 1],  # factor
            "uniform scale": 1,  # factor
            "rotation": [0, 0, 180],  # degrees
            "shear": [0, 0, 0],  # degrees
            "translation": [50, 50, 50],  # mm
            "reflection": [.5, .5, .5]
        },  # Bernoulli p
        interp_order=1,
        mode="constant"),
    # DefaultNormalizer(tags=["bcolzall:3d"])
    ZMUV("bcolzall:3d", bias=-648.59027, std=679.21021)
]

preprocessors_valid = [
    # DicomToHU(tags=["bcolzall:3d"]),
    Augment3D(tags=["bcolzall:3d"],
              output_shape=nn_input_shape,
              norm_patch_shape=norm_patch_shape,
              interp_order=1,
              mode="constant"),
    ZMUV("bcolzall:3d", bias=-648.59027, std=679.21021)
]

#####################
#     training      #
#####################