Esempio n. 1
0
def _mg_solve_forward(divergence, domain, pressure_guess, solvers):
    fluid_mask = domain.accessible_tensor(extend=1)
    active_mask = domain.active_tensor(extend=1)
    if active_mask is not None or fluid_mask is not None:
        if not np.all([s.supports_continuous_masks for s in solvers[:-1]]):
            logging.warning(
                "MultiscaleSolver solver: There are boundary conditions inside the domain but "
                "not all intermediate solvers support continuous masks")
    div_lvls = [divergence]
    act_lvls = [active_mask]
    fld_lvls = [fluid_mask]
    for grid_i in range(len(solvers) - 1):
        div_lvls.insert(0, math.downsample2x(div_lvls[0]))
        act_lvls.insert(
            0,
            math.downsample2x(act_lvls[0])
            if act_lvls[0] is not None else None)
        fld_lvls.insert(
            0,
            math.downsample2x(fld_lvls[0])
            if fld_lvls[0] is not None else None)
        if pressure_guess is not None:
            pressure_guess = math.downsample2x(pressure_guess)

    iter_list = []
    for i, div in enumerate(div_lvls):
        pressure_guess, iteration = solvers[i].solve(
            div, FluidDomain(act_lvls[i], fld_lvls[i], boundaries),
            pressure_guess)
        iter_list.append(iteration)
        if pressure_guess.shape[1] < divergence.shape[1]:
            pressure_guess = math.upsample2x(
                pressure_guess) * 2**math.spatial_rank(divergence)

    return pressure_guess, iter_list
Esempio n. 2
0
 def upsample_apply(params, inputs, **kwargs):
     x = math.wrap(
         inputs, math.batch('batch'),
         *[math.spatial(f'{i}') for i in range(len(inputs.shape) - 2)],
         math.channel('vector'))
     x = math.upsample2x(x)
     return x.native(x.shape)
Esempio n. 3
0
 def test_upsample2x(self):
     meshgrid = math.meshgrid(x=(0, 1, 2, 3), y=(0, -1, -2))
     double_size = math.upsample2x(meshgrid, extrapolation.BOUNDARY)
     same_size = math.downsample2x(double_size)
     math.print(meshgrid, 'Normal size')
     math.print(double_size, 'Double size')
     math.print(same_size, 'Same size')
     math.assert_close(meshgrid.x[1:-1].y[1:-1], same_size.x[1:-1].y[1:-1])
Esempio n. 4
0
def upsample2x(grid: GridType) -> GridType:
    if isinstance(grid, CenteredGrid):
        values = math.upsample2x(grid.values, grid.extrapolation)
        return CenteredGrid(values, grid.bounds, grid.extrapolation)
    elif isinstance(grid, StaggeredGrid):
        raise NotImplementedError()
    else:
        raise ValueError(type(grid))
Esempio n. 5
0
def upsample2x(grid: GridType) -> GridType:
    """
    Increases the number of sample points by a factor of 2 in each spatial dimension.
    The new values are determined via linear interpolation.

    See Also:
        `downsample2x()`.

    Args:
        grid: `CenteredGrid` or `StaggeredGrid`.

    Returns:
        `Grid` of same type as `grid`.
    """
    if isinstance(grid, CenteredGrid):
        values = math.upsample2x(grid.values, grid.extrapolation)
        return CenteredGrid(values,
                            bounds=grid.bounds,
                            extrapolation=grid.extrapolation)
    elif isinstance(grid, StaggeredGrid):
        raise NotImplementedError()
    else:
        raise ValueError(type(grid))
Esempio n. 6
0
def pressure_unet(divergence, scope="pressure_unet"):
    with tf.variable_scope(scope):
        x = divergence

        print(x.shape)

        # DownConv Level 1
        c1 = tf.layers.conv2d(x, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv1", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c2 = tf.layers.conv2d(c1, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv2", trainable=True,
                              reuse=tf.AUTO_REUSE)

        c3 = tf.layers.conv2d(c2, 4, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv3", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c3.shape)

        # DownConv Level 2
        c4 = tf.layers.conv2d(c3, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv4", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c5 = tf.layers.conv2d(c4, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv5", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c6 = tf.layers.conv2d(c5, 8, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv6", trainable=True,
                              reuse=tf.AUTO_REUSE)
        print(c6.shape)

        # DownConv Level 3
        c7 = tf.layers.conv2d(c6, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv7", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c8 = tf.layers.conv2d(c7, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv8", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c9 = tf.layers.conv2d(c8, 16, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv9", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c9.shape)

        # Lowest Convolutions
        c10 = tf.layers.conv2d(c9, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv10", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c11 = tf.layers.conv2d(c10, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv11", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c12 = tf.layers.conv2d(c11, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv12", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c12.shape)

        # UpConv Level 3
        u1 = upsample2x(c12)
        uc1 = tf.layers.conv2d(tf.concat([u1, c8], 3), 16, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv1", trainable=True, reuse=tf.AUTO_REUSE)
        uc2 = tf.layers.conv2d(uc1, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv2",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc3 = tf.layers.conv2d(uc2, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv3",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc3.shape)

        # UpConv Level 2
        u2 = upsample2x(uc3)
        uc4 = tf.layers.conv2d(tf.concat([u2, c5], 3), 8, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv4", trainable=True, reuse=tf.AUTO_REUSE)
        uc5 = tf.layers.conv2d(uc4, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv5",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc6 = tf.layers.conv2d(uc5, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv6",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc6.shape)

        # UpConv Level 1
        u3 = upsample2x(uc6)
        uc7 = tf.layers.conv2d(tf.concat([u3, c2], 3), 4, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv7", trainable=True, reuse=tf.AUTO_REUSE)
        uc8 = tf.layers.conv2d(uc7, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv8",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc9 = tf.layers.conv2d(uc8, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv9",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc9.shape)

        # final Convolution
        out = tf.layers.conv2d(uc9, 1, 5, strides=1, activation=None, padding="same", name="out_conv", trainable=True,
                               reuse=tf.AUTO_REUSE)

        return out
Esempio n. 7
0
def pressure_unet(divergence, geometry_mask=None, scope="pressure_unet"):
    """ Network structure (Based on U-Net) """
    with tf.variable_scope(scope):

        x = divergence

        if geometry_mask is not None:
            g_mask = tf.broadcast_to(geometry_mask, tf.shape(x))  # broadcast same mask across batch
            x = tf.concat([x, g_mask], axis=3)  # concatenate mask to divergence

        print(x.shape)

        # DownConv Level 1
        c1 = tf.layers.conv2d(x, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv1", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c2 = tf.layers.conv2d(c1, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv2", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c3 = tf.layers.conv2d(c2, 4, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv3", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c3.shape)

        # DownConv Level 2
        c4 = tf.layers.conv2d(c3, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv4", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c5 = tf.layers.conv2d(c4, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv5", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c6 = tf.layers.conv2d(c5, 8, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv6", trainable=True,
                              reuse=tf.AUTO_REUSE)
        print(c6.shape)

        # DownConv Level 3
        c7 = tf.layers.conv2d(c6, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv7", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c8 = tf.layers.conv2d(c7, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv8", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c9 = tf.layers.conv2d(c8, 16, 5, strides=2, activation=tf.nn.relu, padding="same", name="conv9", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c9.shape)

        # Lowest Convolutions
        c10 = tf.layers.conv2d(c9, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv10", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c11 = tf.layers.conv2d(c10, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv11", trainable=True,
                              reuse=tf.AUTO_REUSE)
        c12 = tf.layers.conv2d(c11, 32, 5, strides=1, activation=tf.nn.relu, padding="same", name="conv12", trainable=True,
                              reuse=tf.AUTO_REUSE)

        print(c12.shape)

        # UpConv Level 3
        u1 = upsample2x(c12)
        uc1 = tf.layers.conv2d(tf.concat([u1, c8], 3), 16, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv1", trainable=True, reuse=tf.AUTO_REUSE)
        uc2 = tf.layers.conv2d(uc1, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv2",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc3 = tf.layers.conv2d(uc2, 16, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv3",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc3.shape)

        # UpConv Level 2
        u2 = upsample2x(uc3)
        uc4 = tf.layers.conv2d(tf.concat([u2, c5], 3), 8, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv4", trainable=True, reuse=tf.AUTO_REUSE)
        uc5 = tf.layers.conv2d(uc4, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv5",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc6 = tf.layers.conv2d(uc5, 8, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv6",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc6.shape)

        # UpConv Level 1
        u3 = upsample2x(uc6)
        uc7 = tf.layers.conv2d(tf.concat([u3, c2], 3), 4, 5, strides=1, activation=tf.nn.relu, padding="same",
                               name="upconv7", trainable=True, reuse=tf.AUTO_REUSE)
        uc8 = tf.layers.conv2d(uc7, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv8",
                               trainable=True, reuse=tf.AUTO_REUSE)
        uc9 = tf.layers.conv2d(uc8, 4, 5, strides=1, activation=tf.nn.relu, padding="same", name="upconv9",
                               trainable=True, reuse=tf.AUTO_REUSE)

        print(uc9.shape)

        # final Convolution
        out = tf.layers.conv2d(uc9, 1, 5, strides=1, activation=None, padding="same", name="out_conv", trainable=True,
                               reuse=tf.AUTO_REUSE)

        print(out.shape)

        return out
Esempio n. 8
0
def u_net(domain, input_fields, output_field, levels=2, filters=16, blocks_per_level=2, skip_combine='concat', training=False, trainable=True, reuse=None):
    """
Restrictions:
- 2D only
- Domain resolution must be multiple of 2**levels

    :param skip_combine: 'concat'
    :param blocks_per_level: number of residual blocks per level
    :param filters: Number of convolutional filters
    :type filters: int or tuple or list
    :param levels: number of additional resolution levels, equals number of downsampling / upsampling operations
    :param domain: the u-net is executed on this domain.
    :type domain: Domain
    :param input_fields: list of Fields to be passed to the network as input
    :param output_field: determines sample points of the result
    :param training: whether the network is executed in training or inference mode
    :param trainable: whether the weights of the network are trainable
    :param reuse: whether to reuse weights from previous unet calls
    :return: Field sampled like output_field
    """
    assert isinstance(domain, Domain)
    assert isinstance(output_field, Field)
    net_inputs = []
    for input_field in input_fields:
        assert isinstance(input_field, Field)
        resampled = input_field.at(domain)
        net_inputs.append(resampled)
    y = CenteredGrid.sample(math.concat([math.to_float(grid.data) for grid in net_inputs], axis=-1), domain)
    # --- Execute network ---
    pad_width = sum([2 ** i for i in range(levels)])
    y = y.padded([[0, pad_width]] * domain.rank)
    resolutions = [y]
    for level in range(levels):
        level_filters = filters if isinstance(filters, int) else filters[level]
        y = conv_layer(resolutions[0], level_filters, 2, strides=2, activation=tf.nn.relu, padding='valid', name='down_convolution_%d' % level, trainable=trainable, reuse=reuse)
        for i in range(blocks_per_level):
            y = residual_block(y, level_filters, name='down_res_block_%d_%d' % (level, i), training=training, trainable=trainable, reuse=reuse)
        resolutions.insert(0, y)

    y = resolutions.pop(0)
    assert np.all(y.box.size == domain.box.size)

    for level in range(levels):
        y = math.upsample2x(y)
        res_in = resolutions.pop(0)
        res_in = res_in.at(y)  # No resampling required, simply shaving off the top rows
        if skip_combine == 'concat':
            y = y.with_data(math.concat([y.data, res_in.data], axis=-1))
        else:
            raise NotImplementedError()
            y = y + res_in
        y = y.padded([[0, 1], [0, 1]])
        if resolutions:
            level_filters = filters if isinstance(filters, int) else reversed(filters)[level]
            y = conv_layer(y, level_filters, kernel_size=2, activation=tf.nn.relu, padding='valid', name='up_convolution_%d' % level, trainable=trainable, reuse=reuse)
            for i in range(blocks_per_level):
                y = residual_block(y, level_filters, name='up_res_block_%d_%d' % (level, i), training=training, trainable=trainable, reuse=reuse)
        else:  # Last iteration
            y = conv_layer(y, output_field.component_count, kernel_size=2, activation=None, padding='valid', name='up_convolution_%d' % level, trainable=trainable, reuse=reuse)
    result = y.at(output_field)
    return result