def make_symmetrical_block(feature_shape, params):
    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    input_ = Input(shape=feature_shape)

    feature_size = reduce(mul, feature_shape)
    layer = Reshape([
        feature_size,
    ])(input_)

    layer = Dense(
        feature_size,
        activation=activation,
        use_bias=True,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(layer)

    for _ in range(int(params.get("additional_dense_layer", False))):
        layer = Dense(
            feature_size,
            activation=activation,
            use_bias=True,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(layer)

    return Model(inputs=input_, outputs=layer)
def make_one_level_model(feature_shape: Tuple, params: Dict):
    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    z_half_dim = feature_shape[0] // 2 + feature_shape[0] % 2
    y_half_dim = feature_shape[1] // 2 + feature_shape[1] % 2
    x_half_dim = feature_shape[2] // 2 + feature_shape[2] % 2
    symmetrical_block_shape = (z_half_dim, y_half_dim, x_half_dim,
                               feature_shape[3])
    block_dimension = reduce(mul, symmetrical_block_shape) * 8

    inputs = [
        Input(shape=[block_dimension]),
        Input(shape=feature_shape),
    ]
    prev_block, new_block = inputs

    oct1, oct2, oct3, oct4, oct5, oct6, oct7, oct8 = get_octants(new_block)

    sym_block = make_symmetrical_block(symmetrical_block_shape, params)
    r1 = sym_block(oct1)
    r2 = sym_block(oct2)
    r3 = sym_block(oct3)
    r4 = sym_block(oct4)
    r5 = sym_block(oct5)
    r6 = sym_block(oct6)
    r7 = sym_block(oct7)
    r8 = sym_block(oct8)
    new_block = Concatenate(axis=1)([r1, r2, r3, r4, r5, r6, r7, r8])

    dense_in = Dense(
        block_dimension,
        activation=None,
        use_bias=False,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(new_block)

    layer = Dense(
        block_dimension,
        activation=None,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(prev_block)

    layer = Add()([dense_in, layer])
    layer = activation(layer)

    layer = Dense(
        block_dimension,
        activation=None,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(layer)

    layer = Add()([layer, prev_block])  # residual connection
    layer = activation(layer)

    return Model(inputs=inputs, outputs=layer)
def make_symmetrical_end_block(feature_size, params):
    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    input_ = Input(shape=(feature_size, ))
    layer = input_

    for i in range(3):
        layer = Dense(
            feature_size,
            activation=activation,
            use_bias=True,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(layer)

    return Model(inputs=input_, outputs=layer)
Beispiel #4
0
def make_one_level_model(feature_shape: Tuple, params: Dict):
    feature_size = reduce(mul, feature_shape)

    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    inputs = [
        Input(shape=[feature_size]),
        Input(shape=[feature_size]),
    ]
    prev_block, new_block = inputs

    dense_in = Dense(
        feature_size,
        activation=None,
        use_bias=False,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(new_block)

    layer = Dense(
        feature_size,
        activation=None,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(prev_block)

    layer = Add()([dense_in, layer])
    layer = activation(layer)

    layer = Dense(
        feature_size,
        activation=None,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(layer)

    layer = Add()([layer, prev_block])  # residual connection
    layer = activation(layer)

    return Model(inputs=inputs, outputs=layer)
Beispiel #5
0
def make_model(params: Dict):
    feature_shape = tuple(params["feature_shape"])
    levels = params["scale_levels"]

    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    input_ = Input(shape=(len(levels), ) + feature_shape)
    combined_input = input_

    if params.get("preprocess_channel", {}).get("enabled", False):
        conf = params.get("preprocess_channel", {})
        input_preprocess = InputPreprocessChannel(
            polynom2_initializers=conf.get("polynom2_initializers",
                                           [0.00, -0.1]),
            polynom1_initializers=conf.get("polynom1_initializers",
                                           [0.0, 2.05]),
            bias_initializers=conf.get("bias_initializers", [0.47, -0.42]),
            mask_channel=feature_shape[-1] == 3,
        )
        combined_input = input_preprocess(combined_input)
        feature_shape = input_preprocess.compute_output_shape(feature_shape)

    input_levels = [
        Lambda(lambda x: x[:, i, :, :, :, :])(combined_input)
        for i in range(len(levels))
    ]
    feature_size = reduce(mul, feature_shape)

    level_block = make_one_level_model(feature_shape, params)

    blocks = []
    for idx, scale_level in enumerate(levels):
        if len(blocks) == 0:
            zero_layer = Lambda(lambda x: K.zeros_like(x))(input_levels[idx])
            zero_layer = Reshape([
                feature_size,
            ])(zero_layer)
            prev_block = zero_layer
        else:
            prev_block = blocks[-1]

        assert scale_level[0] == scale_level[1]
        multiplier = scale_level[0] / 64
        multiplier = [multiplier, multiplier] + [
            1,
        ] * (input_levels[idx].shape[-1] - 2)
        new_level = Lambda(lambda x: x * multiplier,
                           name="input_density_mult_{}".format(idx))(
                               input_levels[idx])

        new_level = Reshape([
            feature_size,
        ])(new_level)

        layer = level_block([prev_block, new_level])
        blocks.append(layer)

    prev_layer = blocks[-1]

    for i in range(3):
        output_size = 1 if i == 2 else feature_size
        prev_layer = Dense(
            output_size,
            activation=activation,
            use_bias=True,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(prev_layer)

    sum_layer = Lambda(lambda x: K.sum(x, axis=1, keepdims=True),
                       name="FinalOutputLayer")(prev_layer)

    return Model(inputs=input_, outputs=sum_layer)
Beispiel #6
0
def make_model(params: Dict):
    feature_shape = tuple(params["feature_shape"])
    levels = len(params["scale_levels"])

    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    input_ = Input(shape=(levels, ) + feature_shape)
    combined_input = input_

    if params.get("preprocess_channel", {}).get("enabled", False):
        conf = params.get("preprocess_channel", {})
        input_preprocess = InputPreprocessChannel(
            polynom2_initializers=conf.get("polynom2_initializers",
                                           [0.00, -0.1]),
            polynom1_initializers=conf.get("polynom1_initializers",
                                           [0.0, 2.05]),
            bias_initializers=conf.get("bias_initializers", [0.47, -0.42]),
            mask_channel=feature_shape[-1] == 3,
        )
        combined_input = input_preprocess(combined_input)
        feature_shape = input_preprocess.compute_output_shape(feature_shape)

    input_levels = [
        Lambda(lambda x: x[:, i, :, :, :, :])(combined_input)
        for i in range(levels)
    ]

    y_half_dim = feature_shape[1] // 2 + feature_shape[1] % 2
    x_half_dim = feature_shape[2] // 2 + feature_shape[2] % 2
    symmetrical_block_shape = (feature_shape[0], y_half_dim, x_half_dim,
                               feature_shape[3])

    q1, q2, q3, q4 = get_quadrants(input_levels[0])
    sym_block = make_symmetrical_block(symmetrical_block_shape, params)
    r1 = sym_block(q1)
    r2 = sym_block(q2)
    r3 = sym_block(q3)
    r4 = sym_block(q4)
    start_block = Concatenate(axis=1)([r1, r2, r3, r4])

    block_dimension = int(start_block.shape[1])

    blocks = [start_block]
    for i in range(1, levels):
        q1, q2, q3, q4 = get_quadrants(input_levels[i])
        sym_block = make_symmetrical_block(symmetrical_block_shape, params)
        r1 = sym_block(q1)
        r2 = sym_block(q2)
        r3 = sym_block(q3)
        r4 = sym_block(q4)
        dense_in = Concatenate(axis=1)([r1, r2, r3, r4])

        layer = Dense(
            block_dimension,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(blocks[i - 1])
        layer = Add()([dense_in, layer])
        layer = activation(layer)

        layer = Dense(
            block_dimension,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(layer)
        layer = Add()([layer, blocks[i - 1]])  # residual connection
        layer = activation(layer)

        blocks.append(layer)

    prev_layer = blocks[-1]

    quadrant_size = int(r1.shape[1])
    sym_end_block = make_symmetrical_end_block(quadrant_size, params)
    quadrants = []
    for idx in range(4):
        quadrant = Lambda(lambda x: x[:, quadrant_size * idx:quadrant_size *
                                      (idx + 1)])(prev_layer)
        quadrants.append(sym_end_block(quadrant))

    quadrants = Concatenate(axis=1)(quadrants)

    sum_layer = Lambda(lambda x: K.sum(x, axis=1, keepdims=True),
                       name="FinalOutputLayer")(quadrants)

    return Model(inputs=input_, outputs=sum_layer)
Beispiel #7
0
def make_model(params: Dict):
    feature_shape = tuple(params["feature_shape"])
    levels = len(params["scale_levels"])

    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    input_ = Input(shape=(levels, ) + feature_shape)
    combined_input = input_

    if params.get("preprocess_channel", {}).get("enabled", False):
        conf = params.get("preprocess_channel", {})
        input_preprocess = InputPreprocessChannel(
            polynom2_initializers=conf.get("polynom2_initializers",
                                           [0.00, -0.1]),
            polynom1_initializers=conf.get("polynom1_initializers",
                                           [0.0, 2.05]),
            bias_initializers=conf.get("bias_initializers", [0.47, -0.42]),
            mask_channel=feature_shape[-1] == 3,
        )
        combined_input = input_preprocess(combined_input)
        feature_shape = input_preprocess.compute_output_shape(feature_shape)

    if params.get("euclidian_distance_channel", {}).get("enabled", False):
        conf = params.get("euclidian_distance_channel", {})
        distance_channel = EuclidianDistanceChannel(
            scale_levels=params["scale_levels"],
            patch_size=params["patch_size"],
            voxel_size=params["voxel_size"],
            add_voxel_volume_channel=conf.get("add_voxel_volume_channel",
                                              False),
        )
        combined_input = distance_channel(combined_input)
        feature_shape = distance_channel.compute_output_shape(feature_shape)

    if params.get("channel_crossing", {}).get("enabled", False):
        conf = params.get("preprocess_channel", {})
        channel_crossing = ChannelCrossing(
            conf.get("output_channels", feature_shape[-1]))
        combined_input = channel_crossing(combined_input)
        feature_shape = channel_crossing.compute_output_shape(feature_shape)

    input_levels = [
        Lambda(lambda x: x[:, i, :, :, :, :])(combined_input)
        for i in range(levels)
    ]

    z_half_dim = feature_shape[0] // 2 + feature_shape[0] % 2
    y_half_dim = feature_shape[1] // 2 + feature_shape[1] % 2
    x_half_dim = feature_shape[2] // 2 + feature_shape[2] % 2
    symmetrical_block_shape = (z_half_dim, y_half_dim, x_half_dim,
                               feature_shape[3])

    oct1, oct2, oct3, oct4, oct5, oct6, oct7, oct8 = get_octants(
        input_levels[0])

    sym_block = make_symmetrical_block(symmetrical_block_shape, params)
    r1 = sym_block(oct1)
    r2 = sym_block(oct2)
    r3 = sym_block(oct3)
    r4 = sym_block(oct4)
    r5 = sym_block(oct5)
    r6 = sym_block(oct6)
    r7 = sym_block(oct7)
    r8 = sym_block(oct8)
    start_block = Concatenate(axis=1)([r1, r2, r3, r4, r5, r6, r7, r8])

    block_dimension = int(start_block.shape[1])

    blocks = [start_block]
    for i in range(1, levels):
        oct1, oct2, oct3, oct4, oct5, oct6, oct7, oct8 = get_octants(
            input_levels[i])
        sym_block = make_symmetrical_block(symmetrical_block_shape, params)
        r1 = sym_block(oct1)
        r2 = sym_block(oct2)
        r3 = sym_block(oct3)
        r4 = sym_block(oct4)
        r5 = sym_block(oct5)
        r6 = sym_block(oct6)
        r7 = sym_block(oct7)
        r8 = sym_block(oct8)
        dense_in = Concatenate(axis=1)([r1, r2, r3, r4, r5, r6, r7, r8])

        layer = Dense(
            block_dimension,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(blocks[i - 1])
        layer = Add()([dense_in, layer])
        layer = activation(layer)

        layer = Dense(
            block_dimension,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(layer)
        layer = Add()([layer, blocks[i - 1]])  # residual connection
        layer = activation(layer)

        blocks.append(layer)

    prev_layer = blocks[-1]

    octant_size = int(r1.shape[1])
    sym_end_block = make_symmetrical_end_block(octant_size, params)
    octants = []
    for idx in range(8):
        octant = Lambda(lambda x: x[:, octant_size * idx:octant_size *
                                    (idx + 1)])(prev_layer)
        octants.append(sym_end_block(octant))

    octants = Concatenate(axis=1)(octants)

    sum_layer = Lambda(lambda x: K.sum(x, axis=1, keepdims=True),
                       name="FinalOutputLayer")(octants)

    return Model(inputs=input_, outputs=sum_layer)
Beispiel #8
0
def make_model(params: Dict):
    feature_shape = tuple(params["feature_shape"])
    levels = len(params["scale_levels"])

    feature_size = reduce(mul, feature_shape)

    activation = get_activation(params["activation"])
    bias_initializer = get_initializer(params["bias_initializer"])
    kernel_initializer = get_initializer(params["kernel_initializer"])

    inputs = [Input(shape=(reduce(mul, feature_shape), ))]
    start_block = inputs[0]

    # optional input preprocess channel
    if params.get("preprocess_channel", {}).get("enabled", False):
        conf = params.get("preprocess_channel", {})
        input_preprocess = InputPreprocessChannel(
            polynom2_initializers=conf.get("polynom2_initializers",
                                           [0.00, -0.1]),
            polynom1_initializers=conf.get("polynom1_initializers",
                                           [0.0, 2.05]),
            bias_initializers=conf.get("bias_initializers", [0.47, -0.42]),
            mask_channel=feature_shape[-1] == 3,
        )
        start_block = input_preprocess(start_block)
        new_feature_shape = input_preprocess.compute_output_shape(
            feature_shape)
        feature_size = reduce(mul, new_feature_shape)

    start_block = Dense(
        feature_size,
        activation=activation,
        use_bias=True,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(start_block)
    start_block = Dense(
        feature_size,
        activation=activation,
        use_bias=True,
        kernel_initializer=kernel_initializer,
        bias_initializer=bias_initializer,
    )(start_block)

    blocks = [start_block]
    for i in range(1, levels):
        inputs.append(Input(shape=(reduce(mul, feature_shape), )))
        dense_in = inputs[i]
        if params.get("preprocess_channel", {}).get("enabled", False):
            dense_in = input_preprocess(dense_in)
        dense_in = Dense(
            feature_size,
            activation=None,
            use_bias=False,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(dense_in)

        layer = Dense(
            feature_size,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(blocks[i - 1])
        layer = Add()([dense_in, layer])
        layer = activation(layer)

        layer = Dense(
            feature_size,
            activation=None,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(layer)
        layer = Add()([layer, blocks[i - 1]])  # residual connection
        layer = activation(layer)

        blocks.append(layer)

    prev_layer = blocks[-1]

    for i in range(3):
        output_size = 1 if i == 2 else feature_size
        prev_layer = Dense(
            output_size,
            activation=activation,
            use_bias=True,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )(prev_layer)

    sum_layer = Lambda(lambda x: K.sum(x, axis=1, keepdims=True),
                       name="FinalOutputLayer")(prev_layer)

    return Model(inputs=inputs, outputs=sum_layer)