def build_model(input_layer=None):

    #################
    # Regular model #
    #################

    l_4ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:4ch"])
    l_2ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:2ch"])

    # Add an axis to concatenate over later
    l_4chr = nn.layers.ReshapeLayer(l_4ch, (
        -1,
        1,
    ) + l_4ch.output_shape[1:])
    l_2chr = nn.layers.ReshapeLayer(l_2ch, (
        -1,
        1,
    ) + l_2ch.output_shape[1:])

    # Cut the images in half, flip the left ones
    l_4ch_left = nn.layers.SliceLayer(l_4ch,
                                      indices=slice(image_size // 2 - 1, None,
                                                    -1),
                                      axis=-1)
    l_4ch_right = nn.layers.SliceLayer(l_4ch,
                                       indices=slice(image_size // 2, None, 1),
                                       axis=-1)
    l_2ch_left = nn.layers.SliceLayer(l_2ch,
                                      indices=slice(image_size // 2 - 1, None,
                                                    -1),
                                      axis=-1)
    l_2ch_right = nn.layers.SliceLayer(l_2ch,
                                       indices=slice(image_size // 2, None, 1),
                                       axis=-1)

    # Concatenate over second axis
    l_24lr = nn.layers.ConcatLayer(
        [l_4ch_left, l_4ch_right, l_2ch_left, l_2ch_right], axis=1)

    # Move second axis to batch, process them all in the same way
    l_halves = nn.layers.ReshapeLayer(
        l_24lr, (-1, nr_frames, image_size, image_size // 2))

    # First, do some convolutions in all directions
    l1a = nn.layers.dnn.Conv2DDNNLayer(l_halves,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(1, 2), stride=(1, 2))

    # Then, only use the last axis
    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(1, 2), stride=(1, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(1, 2), stride=(1, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(1, 2), stride=(1, 2))

    # Now, process each row seperately, by flipping the channel and height axis, and then putting height in the batch
    l4shuffle = nn.layers.DimshuffleLayer(l4, pattern=(0, 2, 1, 3))
    l4rows = nn.layers.ReshapeLayer(
        l4shuffle,
        (-1, l4shuffle.output_shape[-2], l4shuffle.output_shape[-1]))

    # Systole
    ldsys1 = nn.layers.DenseLayer(l4rows,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(ldsys1drop,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
    ldsys3mu = nn.layers.DenseLayer(ldsys2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(16.0),
                                    nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(4.0),
                                       nonlinearity=lb_softplus(.01))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_24lr_sys_musigma = nn.layers.ReshapeLayer(ldsys3musigma,
                                                (-1, 4, image_size, 2))
    l_4ch_left_sys_musigma = nn.layers.SliceLayer(l_24lr_sys_musigma,
                                                  indices=0,
                                                  axis=1)
    l_4ch_right_sys_musigma = nn.layers.SliceLayer(l_24lr_sys_musigma,
                                                   indices=1,
                                                   axis=1)
    l_2ch_left_sys_musigma = nn.layers.SliceLayer(l_24lr_sys_musigma,
                                                  indices=2,
                                                  axis=1)
    l_2ch_right_sys_musigma = nn.layers.SliceLayer(l_24lr_sys_musigma,
                                                   indices=3,
                                                   axis=1)

    l_4ch_sys_musigma = layers.SumGaussLayer(
        [l_4ch_left_sys_musigma, l_4ch_right_sys_musigma])
    l_2ch_sys_musigma = layers.SumGaussLayer(
        [l_2ch_left_sys_musigma, l_2ch_right_sys_musigma])

    l_sys_musigma = layers.IraLayerNoTime(l_4ch_sys_musigma, l_2ch_sys_musigma)

    l_systole = layers.MuSigmaErfLayer(l_sys_musigma)

    # Systole
    lddia1 = nn.layers.DenseLayer(l4rows,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(lddia1drop,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia2drop = nn.layers.dropout(lddia2, p=0.5)
    lddia3mu = nn.layers.DenseLayer(lddia2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(16.0),
                                    nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(4.0),
                                       nonlinearity=lb_softplus(.01))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_24lr_dia_musigma = nn.layers.ReshapeLayer(lddia3musigma,
                                                (-1, 4, image_size, 2))
    l_4ch_left_dia_musigma = nn.layers.SliceLayer(l_24lr_dia_musigma,
                                                  indices=0,
                                                  axis=1)
    l_4ch_right_dia_musigma = nn.layers.SliceLayer(l_24lr_dia_musigma,
                                                   indices=1,
                                                   axis=1)
    l_2ch_left_dia_musigma = nn.layers.SliceLayer(l_24lr_dia_musigma,
                                                  indices=2,
                                                  axis=1)
    l_2ch_right_dia_musigma = nn.layers.SliceLayer(l_24lr_dia_musigma,
                                                   indices=3,
                                                   axis=1)

    l_4ch_dia_musigma = layers.SumGaussLayer(
        [l_4ch_left_dia_musigma, l_4ch_right_dia_musigma])
    l_2ch_dia_musigma = layers.SumGaussLayer(
        [l_2ch_left_dia_musigma, l_2ch_right_dia_musigma])

    l_dia_musigma = layers.IraLayerNoTime(l_4ch_dia_musigma, l_2ch_dia_musigma)

    l_diastole = layers.MuSigmaErfLayer(l_dia_musigma)

    return {
        "inputs": {
            "sliced:data:chanzoom:4ch": l_4ch,
            "sliced:data:chanzoom:2ch": l_2ch,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {},
        "meta_outputs": {}
    }
Esempio n. 2
0
def build_model(input_layer=None):

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    if input_layer:
        l0 = input_layer
    else:
        l0 = nn.layers.InputLayer(input_size)
    l0c = dihedral.CyclicSliceLayer(l0)

    l1a = nn.layers.dnn.Conv2DDNNLayer(
        l0c,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=64,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(
        l1a,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=64,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))
    l1r = dihedral_fast.CyclicConvRollLayer(l1)

    l2a = nn.layers.dnn.Conv2DDNNLayer(
        l1r,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=128,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(
        l2a,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=128,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))
    l2r = dihedral_fast.CyclicConvRollLayer(l2)

    l3a = nn.layers.dnn.Conv2DDNNLayer(
        l2r,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=256,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(
        l3a,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=256,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(
        l3b,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=256,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))
    l3r = dihedral_fast.CyclicConvRollLayer(l3)

    l4a = nn.layers.dnn.Conv2DDNNLayer(
        l3r,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(
        l4a,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(
        l4b,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))
    l4r = dihedral_fast.CyclicConvRollLayer(l4)

    l5a = nn.layers.dnn.Conv2DDNNLayer(
        l4r,
        W=nn.init.Orthogonal("relu"),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(
        l5a,
        W=nn.init.Orthogonal("relu"),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(
        l5b,
        W=nn.init.Orthogonal("relu"),
        filter_size=(3, 3),
        num_filters=512,
        stride=(1, 1),
        pad="same",
        nonlinearity=nn.nonlinearities.very_leaky_rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2, 2), stride=(2, 2))
    l5r = dihedral_fast.CyclicConvRollLayer(l5)
    l5f = nn.layers.FlattenLayer(l5r)
    l5m = dihedral.CyclicPoolLayer(l5f, pool_function=rms)

    #    l5drop = nn.layers.dropout(l5m, p=0.5)

    # Systole Dense layers
    ldsys1 = nn.layers.DenseLayer(
        l5m,
        num_units=256,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.very_leaky_rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(
        ldsys1drop,
        num_units=512,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.very_leaky_rectify)

    ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
    ldsys3mu = nn.layers.DenseLayer(ldsys2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole Dense layers
    lddia1 = nn.layers.DenseLayer(
        l5m,
        num_units=256,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.very_leaky_rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(
        lddia1drop,
        num_units=512,
        W=nn.init.Orthogonal("relu"),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.very_leaky_rectify)

    lddia2drop = nn.layers.dropout(lddia2, p=0.5)
    lddia3mu = nn.layers.DenseLayer(lddia2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    return {
        "inputs": {
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ldsys1: l2_weight,
            ldsys2: l2_weight,
            ldsys3mu: l2_weight_out,
            ldsys3sigma: l2_weight_out,
            lddia1: l2_weight,
            lddia2: l2_weight,
            lddia3mu: l2_weight_out,
            lddia3sigma: l2_weight_out,
        },
        "meta_outputs": {
            "systole": ldsys2,
            "diastole": lddia2,
        }
    }
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:sax"]
    input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
    input_size_locations = data_sizes["sliced:data:sax:locations"]

    l0 = nn.layers.InputLayer(input_size)
    lin_slice_mask = nn.layers.InputLayer(input_size_mask)
    lin_slice_locations = nn.layers.InputLayer(input_size_locations)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers and some dense layers are defined in a submodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))

    import je_ss_jonisc80_leaky_convroll_augzoombright
    submodel = je_ss_jonisc80_leaky_convroll_augzoombright.build_model(
        l0_slices)

    # Systole Dense layers
    l_sys_mu = submodel["meta_outputs"]["systole:mu"]
    l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
    # Diastole Dense layers
    l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
    l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]

    # AGGREGATE SLICES PER PATIENT
    l_scaled_slice_locations = layers.TrainableScaleLayer(
        lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)

    # Systole
    l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
    l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
    l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=100.)

    l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)

    # Diastole
    l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
    l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
    l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=100.)

    l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)

    submodels = [submodel]
    return {
        "inputs": {
            "sliced:data:sax": l0,
            "sliced:data:sax:is_not_padded": lin_slice_mask,
            "sliced:data:sax:locations": lin_slice_locations,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable":
        dict({}, **{
            k: v
            for d in [
                model["regularizable"] for model in submodels
                if "regularizable" in model
            ] for k, v in d.items()
        }),
        "pretrained": {
            je_ss_jonisc80_leaky_convroll_augzoombright.__name__:
            submodel["outputs"],
        }
    }
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:sax"]
    input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
    input_size_locations = data_sizes["sliced:data:sax:locations"]

    l0 = nn.layers.InputLayer(input_size)
    lin_slice_mask = nn.layers.InputLayer(input_size_mask)
    lin_slice_locations = nn.layers.InputLayer(input_size_locations)

    # PREPROCESS SLICES SEPERATELY
    l0_slices = nn.layers.ReshapeLayer(l0, (batch_size * nr_slices, 30, patch_px, patch_px)) # (bxs, t, i, j)
    subsample_factor = 2
    l0_slices_subsampled = nn.layers.SliceLayer(l0_slices, axis=1, indices=slice(0, 30, subsample_factor))
    nr_frames_subsampled = 30 / subsample_factor

    # PREPROCESS FRAMES SEPERATELY
    l0_frames = nn.layers.ReshapeLayer(l0_slices_subsampled, (batch_size * nr_slices * nr_frames_subsampled, 1, patch_px, patch_px))  # (bxsxt, 1, i, j)

    l1a = nn.layers.dnn.Conv2DDNNLayer(l0_frames,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1c = nn.layers.dnn.Conv2DDNNLayer(l1b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=16, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1c, pool_size=(2,2), stride=(2,2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l2c = nn.layers.dnn.Conv2DDNNLayer(l2b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=32, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2c, pool_size=(2,2), stride=(2,2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3d = nn.layers.dnn.Conv2DDNNLayer(l3c, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3d, pool_size=(2,2), stride=(2,2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4d = nn.layers.dnn.Conv2DDNNLayer(l4c, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4d, pool_size=(2,2), stride=(2,2))

    l5a = nn.layers.dnn.Conv2DDNNLayer(l4,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5d = nn.layers.dnn.Conv2DDNNLayer(l5c, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5d, pool_size=(2,2), stride=(2,2))

    l5drop = nn.layers.dropout(l5, p=0.5)
    ld1 = nn.layers.DenseLayer(l5drop, num_units=256, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    ld1drop = nn.layers.dropout(ld1, p=0.5)
    ld2 = nn.layers.DenseLayer(ld1drop, num_units=256, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    ld2drop = nn.layers.dropout(ld2, p=0.5)

    ld3mu = nn.layers.DenseLayer(ld2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(15.0), nonlinearity=None)
    ld3sigma = nn.layers.DenseLayer(ld2drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(5.0), nonlinearity=lb_softplus(.3))
    ld3musigma = nn.layers.ConcatLayer([ld3mu, ld3sigma], axis=1)

    # Go back to a per slice model
    l_slices_musigma = nn.layers.ReshapeLayer(ld3musigma, (batch_size * nr_slices, nr_frames_subsampled, 2))  # (bxs, t, 2)
    l_slices_musigma_sys = layers.ArgmaxAndMaxLayer(l_slices_musigma, mode='min')  # (bxs, 2)
    l_slices_musigma_dia = layers.ArgmaxAndMaxLayer(l_slices_musigma, mode='max')  # (bxs, 2)

    # AGGREGATE SLICES PER PATIENT
    l_scaled_slice_locations = layers.TrainableScaleLayer(lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)

    # Systole
    l_pat_sys_ss_musigma = nn.layers.ReshapeLayer(l_slices_musigma_sys, (batch_size, nr_slices, 2))
    l_pat_sys_ss_mu = nn.layers.SliceLayer(l_pat_sys_ss_musigma, indices=0, axis=-1)
    l_pat_sys_ss_sigma = nn.layers.SliceLayer(l_pat_sys_ss_musigma, indices=1, axis=-1)
    l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)

    l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)

    # Diastole
    l_pat_dia_ss_musigma = nn.layers.ReshapeLayer(l_slices_musigma_dia, (batch_size, nr_slices, 2))
    l_pat_dia_ss_mu = nn.layers.SliceLayer(l_pat_dia_ss_musigma, indices=0, axis=-1)
    l_pat_dia_ss_sigma = nn.layers.SliceLayer(l_pat_dia_ss_musigma, indices=1, axis=-1)
    l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask, l_scaled_slice_locations], rescale_input=1.)

    l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)


    return {
        "inputs":{
            "sliced:data:sax": l0,
            "sliced:data:sax:is_not_padded": lin_slice_mask,
            "sliced:data:sax:locations": lin_slice_locations,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
        },
    }
Esempio n. 5
0
def build_model(input_layer=None):

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    if input_layer:
        l0 = input_layer
    else:
        l0 = nn.layers.InputLayer(input_size)

    # Reshape to framemodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, 1, [2], [3]))

    l1a = nn.layers.dnn.Conv2DDNNLayer(l0_slices,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))

    l5a = nn.layers.dnn.Conv2DDNNLayer(l4,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(l5a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(l5b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2, 2), stride=(2, 2))

    l5drop = nn.layers.dropout(l5, p=0.0)
    ld1 = nn.layers.DenseLayer(l5drop,
                               num_units=512,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld1drop = nn.layers.dropout(ld1, p=0.0)
    ld2 = nn.layers.DenseLayer(ld1drop,
                               num_units=512,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld2drop = nn.layers.dropout(ld2, p=0.0)

    ld3mu = nn.layers.DenseLayer(ld2drop,
                                 num_units=1,
                                 W=nn.init.Orthogonal("relu"),
                                 b=nn.init.Constant(200.0),
                                 nonlinearity=None)
    ld3sigma = nn.layers.DenseLayer(ld2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(50.0),
                                    nonlinearity=lb_softplus(3))
    ld3musigma = nn.layers.ConcatLayer([ld3mu, ld3sigma], axis=1)

    # Reshape back to slicemodel
    ld3musigma_slices = nn.layers.ReshapeLayer(ld3musigma, (-1, NR_FRAMES, 2))

    l_systole_musigma = layers.ArgmaxAndMaxLayer(ld3musigma_slices, 'min')
    l_systole = layers.MuSigmaErfLayer(l_systole_musigma)

    l_diastole_musigma = layers.ArgmaxAndMaxLayer(ld3musigma_slices, 'max')
    l_diastole = layers.MuSigmaErfLayer(l_diastole_musigma)

    return {
        "inputs": {
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ld1: l2_weight,
            ld2: l2_weight,
            ld3mu: l2_weight_out,
            ld3musigma: l2_weight_out,
        }
    }
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:randomslices"]

    l0 = nn.layers.InputLayer(input_size)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers and some dense layers are defined in a submodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))

    import je_ss_jonisc64small_360
    submodel = je_ss_jonisc64small_360.build_model(l0_slices)

    # Systole Dense layers
    ldsys2 = submodel["meta_outputs"]["systole"]
    # Diastole Dense layers
    lddia2 = submodel["meta_outputs"]["diastole"]

    # AGGREGATE SLICES PER PATIENT
    # Systole
    ldsys_pat_in = nn.layers.ReshapeLayer(ldsys2, (-1, nr_slices, [1]))

    input_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                    W_hid=nn.init.Orthogonal())
    forget_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                     W_hid=nn.init.Orthogonal(),
                                     b=nn.init.Constant(5.0))
    output_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                     W_hid=nn.init.Orthogonal())
    cell_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                              W_hid=nn.init.Orthogonal(),
                              W_cell=None,
                              nonlinearity=nn.nonlinearities.tanh)

    ldsys_lstm = nn.layers.LSTMLayer(
        ldsys_pat_in,
        num_units=256,
        ingate=input_gate_sys,
        forgetgate=forget_gate_sys,
        cell=cell_sys,
        outgate=output_gate_sys,
        peepholes=False,
        precompute_input=False,
        grad_clipping=5,
        only_return_final=True,
        learn_init=True,
    )

    ldsys_lstm_drop = nn.layers.dropout(ldsys_lstm, p=0.5)

    ldsys3mu = nn.layers.DenseLayer(ldsys_lstm_drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys_lstm_drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole
    lddia_pat_in = nn.layers.ReshapeLayer(lddia2, (-1, nr_slices, [1]))

    input_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                    W_hid=nn.init.Orthogonal())
    forget_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                     W_hid=nn.init.Orthogonal(),
                                     b=nn.init.Constant(5.0))
    output_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                                     W_hid=nn.init.Orthogonal())
    cell_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(),
                              W_hid=nn.init.Orthogonal(),
                              W_cell=None,
                              nonlinearity=nn.nonlinearities.tanh)

    lddia_lstm = nn.layers.LSTMLayer(
        lddia_pat_in,
        num_units=256,
        ingate=input_gate_dia,
        forgetgate=forget_gate_dia,
        cell=cell_dia,
        outgate=output_gate_dia,
        peepholes=False,
        precompute_input=False,
        grad_clipping=5,
        only_return_final=True,
        learn_init=True,
    )

    lddia_lstm_drop = nn.layers.dropout(lddia_lstm, p=0.5)

    lddia3mu = nn.layers.DenseLayer(lddia_lstm_drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia_lstm_drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    submodels = [submodel]
    return {
        "inputs": {
            "sliced:data:randomslices": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable":
        dict(
            {
                ldsys3mu: l2_weight_out,
                ldsys3sigma: l2_weight_out,
                lddia3mu: l2_weight_out,
                lddia3sigma: l2_weight_out,
            }, **{
                k: v
                for d in [
                    model["regularizable"] for model in submodels
                    if "regularizable" in model
                ] for k, v in d.items()
            }),
        "pretrained": {
            je_ss_jonisc64small_360.__name__: submodel["outputs"],
        }
    }
Esempio n. 7
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    l0 = nn.layers.InputLayer(input_size)

    l1a = nn.layers.dnn.Conv2DDNNLayer(l0,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))

    # Systole Dense layers
    ldsys1 = nn.layers.DenseLayer(l4,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(ldsys1drop,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
    ldsys3mu = nn.layers.DenseLayer(ldsys2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(50.0),
                                       nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole Dense layers
    lddia1 = nn.layers.DenseLayer(l4,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(lddia1drop,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia2drop = nn.layers.dropout(lddia2, p=0.5)
    lddia3mu = nn.layers.DenseLayer(lddia2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(50.0),
                                       nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    return {
        "inputs": {
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ldsys1: l2_weight,
            ldsys2: l2_weight,
            ldsys3mu: l2_weight_out,
            ldsys3sigma: l2_weight_out,
            lddia1: l2_weight,
            lddia2: l2_weight,
            lddia3mu: l2_weight_out,
            lddia3sigma: l2_weight_out,
        },
    }
Esempio n. 8
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:sax"]
    input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
    input_size_locations = data_sizes["sliced:data:sax:locations"]

    l0 = nn.layers.InputLayer(input_size)
    lin_slice_mask = nn.layers.InputLayer(input_size_mask)
    lin_slice_locations = nn.layers.InputLayer(input_size_locations)

    # PREPROCESS SLICES SEPERATELY
    l0_slices = nn.layers.ReshapeLayer(
        l0, (batch_size * nr_slices, 30, patch_px, patch_px))  # (bxs, t, i, j)
    subsample_factor = 2
    l0_slices_subsampled = nn.layers.SliceLayer(l0_slices,
                                                axis=1,
                                                indices=slice(
                                                    0, 30, subsample_factor))
    nr_frames_subsampled = 30 / subsample_factor

    # PREPROCESS FRAMES SEPERATELY
    l0_frames = nn.layers.ReshapeLayer(
        l0_slices_subsampled, (batch_size * nr_slices * nr_frames_subsampled,
                               1, patch_px, patch_px))  # (bxsxt, 1, i, j)

    # downsample
    downsample = lambda incoming: nn.layers.dnn.Pool2DDNNLayer(
        incoming, pool_size=(2, 2), stride=(2, 2), mode='average_inc_pad')
    upsample = lambda incoming: nn.layers.Upscale2DLayer(incoming,
                                                         scale_factor=2)
    l0_frames_d0 = l0_frames
    l0_frames_d1 = downsample(l0_frames_d0)
    l0_frames_d2 = downsample(l0_frames_d1)
    l0_frames_d3 = downsample(l0_frames_d2)

    ld3a = nn.layers.dnn.Conv2DDNNLayer(l0_frames_d3,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(3, 3),
                                        num_filters=16,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld3b = nn.layers.dnn.Conv2DDNNLayer(ld3a,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(3, 3),
                                        num_filters=16,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld3c = nn.layers.dnn.Conv2DDNNLayer(ld3b,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(3, 3),
                                        num_filters=16,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld3o = nn.layers.dnn.Conv2DDNNLayer(ld3c,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(3, 3),
                                        num_filters=16,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)

    ld2i = nn.layers.ConcatLayer([l0_frames_d2, upsample(ld3o)], axis=1)
    ld2a = nn.layers.dnn.Conv2DDNNLayer(ld2i,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld2b = nn.layers.dnn.Conv2DDNNLayer(ld2a,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld2c = nn.layers.dnn.Conv2DDNNLayer(ld2b,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld2d = nn.layers.dnn.Conv2DDNNLayer(ld2c,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld2o = nn.layers.dnn.Conv2DDNNLayer(ld2d,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)

    ld1i = nn.layers.ConcatLayer([l0_frames_d1, upsample(ld2o)], axis=1)
    ld1a = nn.layers.dnn.Conv2DDNNLayer(ld1i,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld1b = nn.layers.dnn.Conv2DDNNLayer(ld1a,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld1c = nn.layers.dnn.Conv2DDNNLayer(ld1b,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld1d = nn.layers.dnn.Conv2DDNNLayer(ld1c,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld1o = nn.layers.dnn.Conv2DDNNLayer(ld1d,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)

    ld0i = nn.layers.ConcatLayer([l0_frames_d0, upsample(ld1o)], axis=1)
    ld0a = nn.layers.dnn.Conv2DDNNLayer(ld0i,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld0b = nn.layers.dnn.Conv2DDNNLayer(ld0a,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld0c = nn.layers.dnn.Conv2DDNNLayer(ld0b,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld0d = nn.layers.dnn.Conv2DDNNLayer(ld0c,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=32,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.rectify)
    ld0o = nn.layers.dnn.Conv2DDNNLayer(ld0d,
                                        W=nn.init.Orthogonal("relu"),
                                        filter_size=(5, 5),
                                        num_filters=1,
                                        stride=(1, 1),
                                        pad="same",
                                        nonlinearity=nn.nonlinearities.sigmoid)
    ld0r = nn.layers.ReshapeLayer(
        ld0o,
        (batch_size * nr_slices * nr_frames_subsampled, patch_px, patch_px))

    l_frames_musigma = layers.IntegrateAreaLayer(ld0r,
                                                 sigma_mode='scale',
                                                 sigma_scale=.1)
    area_per_pixel_cm = (float(patch_mm) / float(patch_px))**2 / 100.0
    l_frames_musigma_cm = layers.TrainableScaleLayer(
        l_frames_musigma,
        scale=nn.init.Constant(area_per_pixel_cm),
        trainable=False)

    # Go back to a per slice model
    l_slices_musigma_cm = nn.layers.ReshapeLayer(
        l_frames_musigma_cm,
        (batch_size * nr_slices, nr_frames_subsampled, 2))  # (bxs, t, 2)
    l_slices_musigma_cm_sys = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm,
                                                       mode='min')  # (bxs, 2)
    l_slices_musigma_cm_dia = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm,
                                                       mode='max')  # (bxs, 2)
    l_slices_musigma_cm_avg = layers.ArgmaxAndMaxLayer(l_slices_musigma_cm,
                                                       mode='mean')

    # AGGREGATE SLICES PER PATIENT
    l_scaled_slice_locations = layers.TrainableScaleLayer(
        lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)

    # Systole
    l_pat_sys_ss_musigma_cm = nn.layers.ReshapeLayer(
        l_slices_musigma_cm_sys, (batch_size, nr_slices, 2))
    l_pat_sys_ss_mu_cm = nn.layers.SliceLayer(l_pat_sys_ss_musigma_cm,
                                              indices=0,
                                              axis=-1)
    l_pat_sys_ss_sigma_cm = nn.layers.SliceLayer(l_pat_sys_ss_musigma_cm,
                                                 indices=1,
                                                 axis=-1)
    l_pat_sys_ss_sigma_cm = nn.layers.TrainableScaleLayer(
        l_pat_sys_ss_sigma_cm)
    l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_sys_ss_mu_cm, l_pat_sys_ss_sigma_cm, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=1.)

    l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)

    # Diastole
    l_pat_dia_ss_musigma_cm = nn.layers.ReshapeLayer(
        l_slices_musigma_cm_dia, (batch_size, nr_slices, 2))
    l_pat_dia_ss_mu_cm = nn.layers.SliceLayer(l_pat_dia_ss_musigma_cm,
                                              indices=0,
                                              axis=-1)
    l_pat_dia_ss_sigma_cm = nn.layers.SliceLayer(l_pat_dia_ss_musigma_cm,
                                                 indices=1,
                                                 axis=-1)
    l_pat_dia_ss_sigma_cm = nn.layers.TrainableScaleLayer(
        l_pat_dia_ss_sigma_cm)
    l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_dia_ss_mu_cm, l_pat_dia_ss_sigma_cm, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=1.)

    l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)

    # Average
    l_pat_avg_ss_musigma_cm = nn.layers.ReshapeLayer(
        l_slices_musigma_cm_avg, (batch_size, nr_slices, 2))
    l_pat_avg_ss_mu_cm = nn.layers.SliceLayer(l_pat_avg_ss_musigma_cm,
                                              indices=0,
                                              axis=-1)
    l_pat_avg_ss_sigma_cm = nn.layers.SliceLayer(l_pat_avg_ss_musigma_cm,
                                                 indices=1,
                                                 axis=-1)
    l_pat_avg_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_avg_ss_mu_cm, l_pat_avg_ss_sigma_cm, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=1.)

    l_mean = layers.MuSigmaErfLayer(l_pat_avg_aggr_mu_sigma)

    return {
        "inputs": {
            "sliced:data:sax": l0,
            "sliced:data:sax:is_not_padded": lin_slice_mask,
            "sliced:data:sax:locations": lin_slice_locations,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
            "average": l_mean,
        },
        "regularizable": {},
    }
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:randomslices"]

    l0 = nn.layers.InputLayer(input_size)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers
    batch_norm = nn.layers.normalization.batch_norm
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
    l1a = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l0_slices,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=64,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l1b = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l1a,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=64,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))

    l2a = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l1,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=128,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l2b = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l2a,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=128,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))

    l3a = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l2,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=256,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l3b = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l3a,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=256,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l3c = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l3b,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=256,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))

    l4a = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l3,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l4b = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l4a,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l4c = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l4b,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))

    l5a = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l4,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l5b = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l5a,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l5c = batch_norm(
        nn.layers.dnn.Conv2DDNNLayer(l5b,
                                     W=nn.init.Orthogonal("relu"),
                                     filter_size=(3, 3),
                                     num_filters=512,
                                     stride=(1, 1),
                                     pad="same",
                                     nonlinearity=nn.nonlinearities.rectify))
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2, 2), stride=(2, 2))

    # Systole Dense layers
    ldsys1 = nn.layers.DenseLayer(l5,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(ldsys1drop,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    # Diastole Dense layers
    lddia1 = nn.layers.DenseLayer(l5,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(lddia1drop,
                                  num_units=256,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    # AGGREGATE SLICES PER PATIENT
    # Systole
    ldsys_pat_in = nn.layers.ReshapeLayer(ldsys2, (-1, nr_slices, [1]))

    ldsys_lstm = rnn_layer(ldsys_pat_in, num_units=256)

    ldsys_lstm_drop = nn.layers.dropout(ldsys_lstm, p=0.5)

    ldsys3mu = nn.layers.DenseLayer(ldsys_lstm_drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys_lstm_drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole
    lddia_pat_in = nn.layers.ReshapeLayer(lddia2, (-1, nr_slices, [1]))

    lddia_lstm = rnn_layer(lddia_pat_in, num_units=256)

    lddia_lstm_drop = nn.layers.dropout(lddia_lstm, p=0.5)

    lddia3mu = nn.layers.DenseLayer(lddia_lstm_drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(200.0),
                                    nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia_lstm_drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(100.0),
                                       nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    return {
        "inputs": {
            "sliced:data:randomslices": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ldsys1: l2_weight,
            ldsys2: l2_weight,
            ldsys3mu: l2_weight_out,
            ldsys3sigma: l2_weight_out,
            lddia1: l2_weight,
            lddia2: l2_weight,
            lddia3mu: l2_weight_out,
            lddia3sigma: l2_weight_out,
        },
    }
Esempio n. 10
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:randomslices"]

    l0 = nn.layers.InputLayer(input_size)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers and some dense layers are defined in a submodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))

    from . import je_ss_jonisc64small_360
    submodel = je_ss_jonisc64small_360.build_model(l0_slices)

    # Systole Dense layers
    ldsys2 = submodel["meta_outputs"]["systole"]
    # Diastole Dense layers
    lddia2 = submodel["meta_outputs"]["diastole"]

    # AGGREGATE SLICES PER PATIENT
    # Systole
    ldsys_pat_in = nn.layers.ReshapeLayer(ldsys2, (-1, nr_slices, [1]))

    ldsys_rnn = rnn_layer(ldsys_pat_in, num_units=256)
 
#    ldsys_rnn_drop = nn.layers.dropout(ldsys_rnn, p=0.5)

    ldsys3mu = nn.layers.DenseLayer(ldsys_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole
    lddia_pat_in = nn.layers.ReshapeLayer(lddia2, (-1, nr_slices, [1]))

    lddia_rnn = rnn_layer(lddia_pat_in, num_units=256)
 
#    lddia_rnn_drop = nn.layers.dropout(lddia_rnn, p=0.5)

    lddia3mu = nn.layers.DenseLayer(lddia_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    submodels = [submodel]
    return {
        "inputs":{
            "sliced:data:randomslices": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": dict(
            {
            ldsys3mu: l2_weight_out,
            ldsys3sigma: l2_weight_out,
            lddia3mu: l2_weight_out,
            lddia3sigma: l2_weight_out,},
            **{
                k: v
                for d in [model["regularizable"] for model in submodels if "regularizable" in model]
                for k, v in list(d.items()) }
        ),
#        "pretrained":{
#            je_ss_jonisc64small_360.__name__: submodel["outputs"],
#        }
    }
Esempio n. 11
0
def build_model(input_layer=None):

    #################
    # Regular model #
    #################

    l_4ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:4ch"])
    l_2ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:2ch"])

    # Add an axis to concatenate over later
    l_4chr = nn.layers.ReshapeLayer(l_4ch, (
        batch_size,
        1,
    ) + l_4ch.output_shape[1:])
    l_2chr = nn.layers.ReshapeLayer(l_2ch, (
        batch_size,
        1,
    ) + l_2ch.output_shape[1:])

    # Cut the images in half, flip the left ones
    l_4ch_left = nn.layers.SliceLayer(l_4chr,
                                      indices=slice(image_size // 2 - 1, None,
                                                    -1),
                                      axis=-1)
    l_4ch_right = nn.layers.SliceLayer(l_4chr,
                                       indices=slice(image_size // 2, None, 1),
                                       axis=-1)
    l_2ch_left = nn.layers.SliceLayer(l_2chr,
                                      indices=slice(image_size // 2 - 1, None,
                                                    -1),
                                      axis=-1)
    l_2ch_right = nn.layers.SliceLayer(l_2chr,
                                       indices=slice(image_size // 2, None, 1),
                                       axis=-1)

    # Concatenate over second axis
    l_24lr = nn.layers.ConcatLayer(
        [l_4ch_left, l_4ch_right, l_2ch_left, l_2ch_right], axis=1)
    # b, 4, t, h, w

    # Subsample frames
    SUBSAMPLING_FACTOR = 2
    nr_subsampled_frames = nr_frames // SUBSAMPLING_FACTOR
    l_24lr_ss = nn.layers.SliceLayer(l_24lr,
                                     indices=slice(None, None,
                                                   SUBSAMPLING_FACTOR),
                                     axis=2)

    # Move frames and halves to batch, process them all in the same way, add channel axis
    l_halves = nn.layers.ReshapeLayer(l_24lr_ss,
                                      (batch_size * 4 * nr_subsampled_frames,
                                       1, image_size, image_size // 2))

    # First, do some convolutions in all directions
    l1a = nn.layers.dnn.Conv2DDNNLayer(l_halves,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(1, 2), stride=(1, 2))

    # Then, only use the last axis
    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(1, 2), stride=(1, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(1, 2), stride=(1, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(1, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(1, 2), stride=(1, 2))

    # Now, process each row seperately, by flipping the channel and height axis, and then putting height in the batch
    l4shuffle = nn.layers.DimshuffleLayer(l4, pattern=(0, 2, 1, 3))
    l4rows = nn.layers.ReshapeLayer(
        l4shuffle, (batch_size * 4 * nr_subsampled_frames * image_size,
                    l4shuffle.output_shape[-2], l4shuffle.output_shape[-1]))

    # Systole
    ld1 = nn.layers.DenseLayer(l4rows,
                               num_units=256,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld1drop = nn.layers.dropout(ld1, p=0.5)
    ld2 = nn.layers.DenseLayer(ld1drop,
                               num_units=256,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld2drop = nn.layers.dropout(ld2, p=0.5)
    ld3mu = nn.layers.DenseLayer(ld2drop,
                                 num_units=1,
                                 W=nn.init.Orthogonal("relu"),
                                 b=nn.init.Constant(16.0),
                                 nonlinearity=None)
    ld3sigma = nn.layers.DenseLayer(ld2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(4.0),
                                    nonlinearity=lb_softplus(.01))
    ld3musigma = nn.layers.ConcatLayer([ld3mu, ld3sigma], axis=1)

    # Get the four halves back
    l_24lr_musigma = nn.layers.ReshapeLayer(
        ld3musigma, (batch_size, 4, nr_subsampled_frames, image_size, 2))
    l_24lr_musigma_shuffle = nn.layers.DimshuffleLayer(l_24lr_musigma,
                                                       pattern=(0, 2, 1, 3, 4))
    l_24lr_musigma_re = nn.layers.ReshapeLayer(
        l_24lr_musigma_shuffle,
        (batch_size * nr_subsampled_frames, 4, image_size, 2))

    l_4ch_left_musigma = nn.layers.SliceLayer(l_24lr_musigma_re,
                                              indices=0,
                                              axis=1)
    l_4ch_right_musigma = nn.layers.SliceLayer(l_24lr_musigma_re,
                                               indices=1,
                                               axis=1)
    l_2ch_left_musigma = nn.layers.SliceLayer(l_24lr_musigma_re,
                                              indices=2,
                                              axis=1)
    l_2ch_right_musigma = nn.layers.SliceLayer(l_24lr_musigma_re,
                                               indices=3,
                                               axis=1)

    l_4ch_musigma = layers.SumGaussLayer(
        [l_4ch_left_musigma, l_4ch_right_musigma])
    l_2ch_musigma = layers.SumGaussLayer(
        [l_2ch_left_musigma, l_2ch_right_musigma])

    l_musigma_frames = layers.IraLayerNoTime(l_4ch_musigma, l_2ch_musigma)

    # Minmax over time
    print l_musigma_frames.output_shape
    l_musigmas = nn.layers.ReshapeLayer(l_musigma_frames,
                                        (-1, nr_subsampled_frames, 2))
    l_musigma_sys = layers.ArgmaxAndMaxLayer(l_musigmas, mode='min')
    l_musigma_dia = layers.ArgmaxAndMaxLayer(l_musigmas, mode='max')

    l_systole = layers.MuSigmaErfLayer(l_musigma_sys)
    l_diastole = layers.MuSigmaErfLayer(l_musigma_dia)

    return {
        "inputs": {
            "sliced:data:chanzoom:4ch": l_4ch,
            "sliced:data:chanzoom:2ch": l_2ch,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {},
        "meta_outputs": {}
    }
Esempio n. 12
0
def build_model(input_layer=None):

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:singleslice"]

    if input_layer:
        l0 = input_layer
    else:
        l0 = nn.layers.InputLayer(input_size)

    # Subsample frames
    subsample_factor = 2
    l0_slices_subsampled = nn.layers.SliceLayer(l0,
                                                axis=1,
                                                indices=slice(
                                                    0, NR_FRAMES,
                                                    subsample_factor))
    nr_frames_subsampled = NR_FRAMES / subsample_factor

    # Reshape to framemodel
    l0_frames = nn.layers.ReshapeLayer(l0_slices_subsampled, (-1, 1, [2], [3]))

    l1a = nn.layers.dnn.Conv2DDNNLayer(l0_frames,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=16,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=32,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))

    l5a = nn.layers.dnn.Conv2DDNNLayer(l4,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(l5a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(l5b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2, 2), stride=(2, 2))

    l5drop = nn.layers.dropout(l5, p=0.5)
    ld1 = nn.layers.DenseLayer(l5drop,
                               num_units=512,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld1drop = nn.layers.dropout(ld1, p=0.5)
    ld2 = nn.layers.DenseLayer(ld1drop,
                               num_units=512,
                               W=nn.init.Orthogonal("relu"),
                               b=nn.init.Constant(0.1),
                               nonlinearity=nn.nonlinearities.rectify)

    ld2drop = nn.layers.dropout(ld2, p=0.5)

    ld3mu = nn.layers.DenseLayer(ld2drop,
                                 num_units=1,
                                 W=nn.init.Orthogonal("relu"),
                                 b=nn.init.Constant(200.0),
                                 nonlinearity=None)
    ld3sigma = nn.layers.DenseLayer(ld2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(50.0),
                                    nonlinearity=lb_softplus(3))
    ld3musigma = nn.layers.ConcatLayer([ld3mu, ld3sigma], axis=1)

    # Attention model

    l_att_d1 = nn.layers.DenseLayer(l5,
                                    num_units=32,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(0.1),
                                    nonlinearity=nn.nonlinearities.rectify)
    l_att_d1re = nn.layers.ReshapeLayer(
        l_att_d1, (batch_size, nr_frames_subsampled * 32))
    l_att_d2 = nn.layers.DenseLayer(l_att_d1re,
                                    num_units=64,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(0.1),
                                    nonlinearity=nn.nonlinearities.rectify)
    l_att = nn.layers.DenseLayer(l_att_d2,
                                 num_units=nr_frames_subsampled,
                                 W=nn.init.Orthogonal("relu"),
                                 b=nn.init.Constant(0.1),
                                 nonlinearity=None)
    l_att_sys = nn.layers.NonlinearityLayer(
        l_att, nonlinearity=nn.nonlinearities.softmax)
    l_att_dia = nn.layers.NonlinearityLayer(l_att,
                                            nonlinearity=reverse_softmax)

    # Reshape back to slicemodel
    ld3musigma_slices = nn.layers.ReshapeLayer(ld3musigma,
                                               (-1, nr_frames_subsampled, 2))

    l_slices_musigma_cm_sys = layers.SelectWithAttentionLayer(
        [ld3musigma_slices, l_att_sys])  # (bxs, 2)
    l_slices_musigma_cm_dia = layers.SelectWithAttentionLayer(
        [ld3musigma_slices, l_att_dia])  # (bxs, 2)

    l_systole = layers.MuSigmaErfLayer(l_slices_musigma_cm_sys)
    l_diastole = layers.MuSigmaErfLayer(l_slices_musigma_cm_dia)

    return {
        "inputs": {
            "sliced:data:singleslice": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ld1: l2_weight,
            ld2: l2_weight,
            ld3mu: l2_weight_out,
            ld3musigma: l2_weight_out,
        }
    }
Esempio n. 13
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:sax"]
    input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
    input_size_locations = data_sizes["sliced:data:sax:locations"]

    l0 = nn.layers.InputLayer(input_size)
    lin_slice_mask = nn.layers.InputLayer(input_size_mask)
    lin_slice_locations = nn.layers.InputLayer(input_size_locations)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers and some dense layers are defined in a submodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))

    l1a = nn.layers.dnn.Conv2DDNNLayer(l0_slices,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=64,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2, 2), stride=(2, 2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=128,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2, 2), stride=(2, 2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=256,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2, 2), stride=(2, 2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2, 2), stride=(2, 2))

    l5a = nn.layers.dnn.Conv2DDNNLayer(l4,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(l5a,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(l5b,
                                       W=nn.init.Orthogonal("relu"),
                                       filter_size=(3, 3),
                                       num_filters=512,
                                       stride=(1, 1),
                                       pad="same",
                                       nonlinearity=nn.nonlinearities.rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2, 2), stride=(2, 2))

    # Systole Dense layers
    ldsys1 = nn.layers.DenseLayer(l5,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(ldsys1drop,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    ldsys2drop = nn.layers.dropout(ldsys2, p=0.5)
    l_sys_mu = nn.layers.DenseLayer(ldsys2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(10.0),
                                    nonlinearity=None)
    l_sys_sigma = nn.layers.DenseLayer(ldsys2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(3.),
                                       nonlinearity=lb_softplus(.1))

    # Diastole Dense layers
    lddia1 = nn.layers.DenseLayer(l5,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(lddia1drop,
                                  num_units=512,
                                  W=nn.init.Orthogonal("relu"),
                                  b=nn.init.Constant(0.1),
                                  nonlinearity=nn.nonlinearities.rectify)

    lddia2drop = nn.layers.dropout(lddia2, p=0.5)
    l_dia_mu = nn.layers.DenseLayer(lddia2drop,
                                    num_units=1,
                                    W=nn.init.Orthogonal("relu"),
                                    b=nn.init.Constant(10.0),
                                    nonlinearity=None)
    l_dia_sigma = nn.layers.DenseLayer(lddia2drop,
                                       num_units=1,
                                       W=nn.init.Orthogonal("relu"),
                                       b=nn.init.Constant(3.),
                                       nonlinearity=lb_softplus(.1))

    # AGGREGATE SLICES PER PATIENT
    l_scaled_slice_locations = layers.TrainableScaleLayer(
        lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)

    # Systole
    l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
    l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
    l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=1.)

    l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)

    # Diastole
    l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
    l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
    l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=1.)

    l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)

    return {
        "inputs": {
            "sliced:data:sax": l0,
            "sliced:data:sax:is_not_padded": lin_slice_mask,
            "sliced:data:sax:locations": lin_slice_locations,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ldsys1: l2_weight,
            ldsys2: l2_weight,
            l_sys_mu: l2_weight_out,
            l_sys_sigma: l2_weight_out,
            lddia1: l2_weight,
            lddia2: l2_weight,
            l_dia_mu: l2_weight_out,
            l_dia_sigma: l2_weight_out,
        },
    }
def build_model(input_layer = None):

    #################
    # Regular model #
    #################

    l_4ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:4ch"])
    l_2ch = nn.layers.InputLayer(data_sizes["sliced:data:chanzoom:2ch"])

    # Add an axis to concatenate over later
    l_4chr = nn.layers.ReshapeLayer(l_4ch, (batch_size, 1, ) + l_4ch.output_shape[1:])
    l_2chr = nn.layers.ReshapeLayer(l_2ch, (batch_size, 1, ) + l_2ch.output_shape[1:])
    
    # Cut the images in half, flip the left ones
    l_4ch_left = nn.layers.SliceLayer(l_4chr, indices=slice(image_size//2-1, None, -1), axis=-1)
    l_4ch_right = nn.layers.SliceLayer(l_4chr, indices=slice(image_size//2, None, 1), axis=-1)
    l_2ch_left = nn.layers.SliceLayer(l_2chr, indices=slice(image_size//2-1, None, -1), axis=-1)
    l_2ch_right = nn.layers.SliceLayer(l_2chr, indices=slice(image_size//2, None, 1), axis=-1)

    # Concatenate over second axis
    l_24lr = nn.layers.ConcatLayer([l_4ch_left, l_4ch_right, l_2ch_left, l_2ch_right], axis=1)
    # b, 4, t, h, w

    # Subsample frames
    SUBSAMPLING_FACTOR = 2
    nr_subsampled_frames = nr_frames // SUBSAMPLING_FACTOR
    l_24lr_ss = nn.layers.SliceLayer(l_24lr, indices=slice(None, None, SUBSAMPLING_FACTOR), axis=2)

    # Move frames and halves to batch, process them all in the same way, add channel axis
    l_halves = nn.layers.ReshapeLayer(l_24lr_ss, (batch_size * 4 * nr_subsampled_frames, 1, image_size, image_size//2))

    # First, do some convolutions in all directions
    num_channels = 64
    l1a = nn.layers.dnn.Conv2DDNNLayer(l_halves,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=num_channels, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=num_channels, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    
    # Then, put an rnn over the last axis
    l1_shuffle = nn.layers.DimshuffleLayer(l1b, pattern=(0, 2, 3, 1))
    l1_r = nn.layers.ReshapeLayer(l1_shuffle, (batch_size * 4 * nr_subsampled_frames * image_size, image_size//2, num_channels))

    l_rnn = rnn_layer(l1_r, 1024)

    ld3mu = nn.layers.DenseLayer(l_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(16.0), nonlinearity=None)
    ld3sigma = nn.layers.DenseLayer(l_rnn, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(4.0), nonlinearity=lb_softplus(.01))
    ld3musigma = nn.layers.ConcatLayer([ld3mu, ld3sigma], axis=1)

    # Get the four halves back
    l_24lr_musigma = nn.layers.ReshapeLayer(ld3musigma, (batch_size, 4, nr_subsampled_frames, image_size, 2))
    l_24lr_musigma_shuffle = nn.layers.DimshuffleLayer(l_24lr_musigma, pattern=(0, 2, 1, 3, 4))
    l_24lr_musigma_re = nn.layers.ReshapeLayer(l_24lr_musigma_shuffle, (batch_size * nr_subsampled_frames, 4, image_size, 2))

    l_4ch_left_musigma = nn.layers.SliceLayer(l_24lr_musigma_re, indices=0, axis=1)   
    l_4ch_right_musigma = nn.layers.SliceLayer(l_24lr_musigma_re, indices=1, axis=1)   
    l_2ch_left_musigma = nn.layers.SliceLayer(l_24lr_musigma_re, indices=2, axis=1)   
    l_2ch_right_musigma = nn.layers.SliceLayer(l_24lr_musigma_re, indices=3, axis=1)

    l_4ch_musigma = layers.SumGaussLayer([l_4ch_left_musigma, l_4ch_right_musigma])
    l_2ch_musigma = layers.SumGaussLayer([l_2ch_left_musigma, l_2ch_right_musigma])

    l_musigma_frames = layers.IraLayerNoTime(l_4ch_musigma, l_2ch_musigma)

    # Minmax over time
    print(l_musigma_frames.output_shape)
    l_musigmas = nn.layers.ReshapeLayer(l_musigma_frames, (-1, nr_subsampled_frames, 2))
    l_musigma_sys = layers.ArgmaxAndMaxLayer(l_musigmas, mode='min')
    l_musigma_dia = layers.ArgmaxAndMaxLayer(l_musigmas, mode='max')

    l_systole = layers.MuSigmaErfLayer(l_musigma_sys)
    l_diastole = layers.MuSigmaErfLayer(l_musigma_dia)
 
    return {
        "inputs":{
            "sliced:data:chanzoom:4ch": l_4ch,
            "sliced:data:chanzoom:2ch": l_2ch,
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
        },
        "meta_outputs": {
        }
    }
Esempio n. 15
0
def build_model():

    import j6_2ch_gauss, j6_4ch_gauss
    meta_2ch = j6_2ch_gauss.build_model()
    meta_4ch = j6_4ch_gauss.build_model()

    l_meta_2ch_systole = nn.layers.DenseLayer(
        meta_2ch["meta_outputs"]["systole"],
        num_units=64,
        W=nn.init.Orthogonal(),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.rectify)
    l_meta_2ch_diastole = nn.layers.DenseLayer(
        meta_2ch["meta_outputs"]["diastole"],
        num_units=64,
        W=nn.init.Orthogonal(),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.rectify)

    l_meta_4ch_systole = nn.layers.DenseLayer(
        meta_4ch["meta_outputs"]["systole"],
        num_units=64,
        W=nn.init.Orthogonal(),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.rectify)
    l_meta_4ch_diastole = nn.layers.DenseLayer(
        meta_4ch["meta_outputs"]["diastole"],
        num_units=64,
        W=nn.init.Orthogonal(),
        b=nn.init.Constant(0.1),
        nonlinearity=nn.nonlinearities.rectify)

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:sax"]
    input_size_mask = data_sizes["sliced:data:sax:is_not_padded"]
    input_size_locations = data_sizes["sliced:data:sax:locations"]

    l0 = nn.layers.InputLayer(input_size)
    lin_slice_mask = nn.layers.InputLayer(input_size_mask)
    lin_slice_locations = nn.layers.InputLayer(input_size_locations)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers and some dense layers are defined in a submodel
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))

    import je_ss_jonisc64small_360_gauss_longer
    submodel = je_ss_jonisc64small_360_gauss_longer.build_model(l0_slices)

    # Systole Dense layers
    l_sys_mu = submodel["meta_outputs"]["systole:mu"]
    l_sys_sigma = submodel["meta_outputs"]["systole:sigma"]
    l_sys_meta = submodel["meta_outputs"]["systole"]
    # Diastole Dense layers
    l_dia_mu = submodel["meta_outputs"]["diastole:mu"]
    l_dia_sigma = submodel["meta_outputs"]["diastole:sigma"]
    l_dia_meta = submodel["meta_outputs"]["diastole"]

    # AGGREGATE SLICES PER PATIENT
    l_scaled_slice_locations = layers.TrainableScaleLayer(
        lin_slice_locations, scale=nn.init.Constant(0.1), trainable=False)

    # Systole
    l_pat_sys_ss_mu = nn.layers.ReshapeLayer(l_sys_mu, (-1, nr_slices))
    l_pat_sys_ss_sigma = nn.layers.ReshapeLayer(l_sys_sigma, (-1, nr_slices))
    l_pat_sys_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_sys_ss_mu, l_pat_sys_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=100.)

    l_systole = layers.MuSigmaErfLayer(l_pat_sys_aggr_mu_sigma)

    l_sys_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(
        l_sys_meta, (-1, nr_slices, 512)),
                                      num_units=64,
                                      W=nn.init.Orthogonal(),
                                      b=nn.init.Constant(0.1),
                                      nonlinearity=nn.nonlinearities.rectify)

    l_meta_systole = nn.layers.ConcatLayer(
        [l_meta_2ch_systole, l_meta_4ch_systole, l_sys_meta])
    l_weights = nn.layers.DenseLayer(l_meta_systole,
                                     num_units=512,
                                     W=nn.init.Orthogonal(),
                                     b=nn.init.Constant(0.1),
                                     nonlinearity=nn.nonlinearities.rectify)
    l_weights = nn.layers.DenseLayer(l_weights,
                                     num_units=3,
                                     W=nn.init.Orthogonal(),
                                     b=nn.init.Constant(0.1),
                                     nonlinearity=nn.nonlinearities.rectify)
    systole_output = layers.WeightedMeanLayer(l_weights, [
        l_systole, meta_2ch["outputs"]["systole"],
        meta_4ch["outputs"]["systole"]
    ])

    # Diastole
    l_pat_dia_ss_mu = nn.layers.ReshapeLayer(l_dia_mu, (-1, nr_slices))
    l_pat_dia_ss_sigma = nn.layers.ReshapeLayer(l_dia_sigma, (-1, nr_slices))
    l_pat_dia_aggr_mu_sigma = layers.JeroenLayer([
        l_pat_dia_ss_mu, l_pat_dia_ss_sigma, lin_slice_mask,
        l_scaled_slice_locations
    ],
                                                 rescale_input=100.)

    l_diastole = layers.MuSigmaErfLayer(l_pat_dia_aggr_mu_sigma)

    l_dia_meta = nn.layers.DenseLayer(nn.layers.ReshapeLayer(
        l_dia_meta, (-1, nr_slices, 512)),
                                      num_units=64,
                                      W=nn.init.Orthogonal(),
                                      b=nn.init.Constant(0.1),
                                      nonlinearity=nn.nonlinearities.rectify)

    l_meta_diastole = nn.layers.ConcatLayer(
        [l_meta_2ch_diastole, l_meta_4ch_diastole, l_dia_meta])
    l_weights = nn.layers.DenseLayer(l_meta_diastole,
                                     num_units=512,
                                     W=nn.init.Orthogonal(),
                                     b=nn.init.Constant(0.1),
                                     nonlinearity=nn.nonlinearities.rectify)
    l_weights = nn.layers.DenseLayer(l_weights,
                                     num_units=3,
                                     W=nn.init.Orthogonal(),
                                     b=nn.init.Constant(0.1),
                                     nonlinearity=nn.nonlinearities.identity)
    diastole_output = layers.WeightedMeanLayer(l_weights, [
        l_diastole, meta_2ch["outputs"]["diastole"],
        meta_4ch["outputs"]["diastole"]
    ])

    submodels = [submodel, meta_2ch, meta_4ch]

    return {
        "inputs":
        dict(
            {
                "sliced:data:sax": l0,
                "sliced:data:sax:is_not_padded": lin_slice_mask,
                "sliced:data:sax:locations": lin_slice_locations,
            }, **{
                k: v
                for d in [model["inputs"] for model in [meta_2ch, meta_4ch]]
                for k, v in d.items()
            }),
        "outputs": {
            "systole": systole_output,
            "diastole": diastole_output,
        },
        "regularizable":
        dict({}, **{
            k: v
            for d in [
                model["regularizable"] for model in submodels
                if "regularizable" in model
            ] for k, v in d.items()
        }),
        "pretrained": {
            je_ss_jonisc64small_360_gauss_longer.__name__: submodel["outputs"],
            j6_2ch_gauss.__name__: meta_2ch["outputs"],
            j6_4ch_gauss.__name__: meta_4ch["outputs"],
        },
        #"cutoff_gradients": [
        #] + [ v for d in [model["meta_outputs"] for model in [meta_2ch, meta_4ch] if "meta_outputs" in model]
        #       for v in d.values() ]
    }
Esempio n. 16
0
def build_model():

    #################
    # Regular model #
    #################
    input_size = data_sizes["sliced:data:randomslices"]

    l0 = nn.layers.InputLayer(input_size)

    # PREPROCESS SLICES SEPERATELY
    # Convolutional layers
    l0_slices = nn.layers.ReshapeLayer(l0, (-1, [2], [3], [4]))
    l1a = nn.layers.dnn.Conv2DDNNLayer(l0_slices,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1b = nn.layers.dnn.Conv2DDNNLayer(l1a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=64, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l1 = nn.layers.dnn.MaxPool2DDNNLayer(l1b, pool_size=(2,2), stride=(2,2))

    l2a = nn.layers.dnn.Conv2DDNNLayer(l1,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l2b = nn.layers.dnn.Conv2DDNNLayer(l2a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=128, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l2 = nn.layers.dnn.MaxPool2DDNNLayer(l2b, pool_size=(2,2), stride=(2,2))

    l3a = nn.layers.dnn.Conv2DDNNLayer(l2,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3b = nn.layers.dnn.Conv2DDNNLayer(l3a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3c = nn.layers.dnn.Conv2DDNNLayer(l3b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=256, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l3 = nn.layers.dnn.MaxPool2DDNNLayer(l3c, pool_size=(2,2), stride=(2,2))

    l4a = nn.layers.dnn.Conv2DDNNLayer(l3,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4b = nn.layers.dnn.Conv2DDNNLayer(l4a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4c = nn.layers.dnn.Conv2DDNNLayer(l4b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l4 = nn.layers.dnn.MaxPool2DDNNLayer(l4c, pool_size=(2,2), stride=(2,2))

    l5a = nn.layers.dnn.Conv2DDNNLayer(l4,  W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5b = nn.layers.dnn.Conv2DDNNLayer(l5a, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5c = nn.layers.dnn.Conv2DDNNLayer(l5b, W=nn.init.Orthogonal("relu"), filter_size=(3,3), num_filters=512, stride=(1,1), pad="same", nonlinearity=nn.nonlinearities.rectify)
    l5 = nn.layers.dnn.MaxPool2DDNNLayer(l5c, pool_size=(2,2), stride=(2,2))

    # Systole Dense layers
    ldsys1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    ldsys1drop = nn.layers.dropout(ldsys1, p=0.5)
    ldsys2 = nn.layers.DenseLayer(ldsys1drop, num_units=256, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    # Diastole Dense layers
    lddia1 = nn.layers.DenseLayer(l5, num_units=512, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    lddia1drop = nn.layers.dropout(lddia1, p=0.5)
    lddia2 = nn.layers.DenseLayer(lddia1drop, num_units=256, W=nn.init.Orthogonal("relu"),b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.rectify)

    # AGGREGATE SLICES PER PATIENT
    # Systole
    ldsys_pat_in = nn.layers.ReshapeLayer(ldsys2, (-1, nr_slices, [1]))

    input_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal())
    forget_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal(), b=nn.init.Constant(5.0))
    output_gate_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal())
    cell_sys = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal(), W_cell=None, nonlinearity=nn.nonlinearities.tanh)

    ldsys_lstm = nn.layers.LSTMLayer(ldsys_pat_in, num_units=256,
                                     ingate=input_gate_sys, forgetgate=forget_gate_sys,
                                     cell=cell_sys, outgate=output_gate_sys,
                                     peepholes=False, precompute_input=False,
                                     grad_clipping=5, only_return_final=True,
                                     learn_init=True,)
 
    ldsys_lstm_drop = nn.layers.dropout(ldsys_lstm, p=0.5)

    ldsys3mu = nn.layers.DenseLayer(ldsys_lstm_drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
    ldsys3sigma = nn.layers.DenseLayer(ldsys_lstm_drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
    ldsys3musigma = nn.layers.ConcatLayer([ldsys3mu, ldsys3sigma], axis=1)

    l_systole = layers.MuSigmaErfLayer(ldsys3musigma)

    # Diastole
    lddia_pat_in = nn.layers.ReshapeLayer(lddia2, (-1, nr_slices, [1]))

    input_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal())
    forget_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal(), b=nn.init.Constant(5.0))
    output_gate_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal())
    cell_dia = nn.layers.Gate(W_in=nn.init.GlorotUniform(), W_hid=nn.init.Orthogonal(), W_cell=None, nonlinearity=nn.nonlinearities.tanh)

    lddia_lstm = nn.layers.LSTMLayer(lddia_pat_in, num_units=256,
                                     ingate=input_gate_dia, forgetgate=forget_gate_dia,
                                     cell=cell_dia, outgate=output_gate_dia,
                                     peepholes=False, precompute_input=False,
                                     grad_clipping=5, only_return_final=True,
                                     learn_init=True,)
 
    lddia_lstm_drop = nn.layers.dropout(lddia_lstm, p=0.5)

    lddia3mu = nn.layers.DenseLayer(lddia_lstm_drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(200.0), nonlinearity=None)
    lddia3sigma = nn.layers.DenseLayer(lddia_lstm_drop, num_units=1, W=nn.init.Orthogonal("relu"), b=nn.init.Constant(100.0), nonlinearity=lb_softplus(3))
    lddia3musigma = nn.layers.ConcatLayer([lddia3mu, lddia3sigma], axis=1)

    l_diastole = layers.MuSigmaErfLayer(lddia3musigma)

    return {
        "inputs":{
            "sliced:data:randomslices": l0
        },
        "outputs": {
            "systole": l_systole,
            "diastole": l_diastole,
        },
        "regularizable": {
            ldsys1: l2_weight,
            ldsys2: l2_weight,
            ldsys3mu: l2_weight_out,
            ldsys3sigma: l2_weight_out,
            lddia1: l2_weight,
            lddia2: l2_weight,
            lddia3mu: l2_weight_out,
            lddia3sigma: l2_weight_out,
        },
    }