Esempio n. 1
0
def eice(T):
    a_ice = np.float32(np.array([0.252751365e-14,0.146898966e-11,0.385852041e-9,\
                      0.602588177e-7,0.615021634e-5,0.420895665e-3,\
                      0.188439774e-1,0.503160820,6.11147274]));
    c_ice = np.float32(np.array([273.15,185,-100,0.00763685,0.000151069,7.48215e-07]))
    T0 = np.float32(273.16)
    return tf.where(T>c_ice[0],eliq(T),\
                   tf.where(T<=c_ice[1],np.float32(100.0)*(c_ice[3]+tfm.maximum(c_ice[2],T-T0)*\
                   (c_ice[4]+tfm.maximum(c_ice[2],T-T0)*c_ice[5])),\
                           np.float32(100.0)*tfm.polyval(a_ice,T-T0)))
Esempio n. 2
0
def esat(T):
    T0 = np.float32(273.16)
    T00 = np.float32(253.16)
    omtmp = (T-T00)/(T0-T00)
    omega = tfm.maximum(np.float32(0.0),tfm.minimum(np.float32(1.0),omtmp))

    return tf.where(T>T0,eliq(T),tf.where(T<T00,eice(T),(omega*eliq(T)+(1-omega)*eice(T))))
Esempio n. 3
0
def eliq(T):
    a_liq = np.float32(np.array([-0.976195544e-15,-0.952447341e-13,\
                                 0.640689451e-10,\
                      0.206739458e-7,0.302950461e-5,0.264847430e-3,\
                      0.142986287e-1,0.443987641,6.11239921]))
    c_liq = np.float32(-80.0)
    T0 = np.float32(273.16)
    return np.float32(100.0) * tfm.polyval(a_liq, tfm.maximum(c_liq, T - T0))
Esempio n. 4
0
def energy_4_log_pdf(z):
    z2 = z[:, 1]
    x1 = -0.5 * ((z2 - w1(z)) / 0.4)**2
    x2 = -0.5 * ((z2 - w1(z) + w3(z)) / 0.35)**2
    a = math.maximum(x1, x2)
    exp1 = math.exp(x1 - a)
    exp2 = math.exp(x2 - a)
    return a + math.log(exp1 + exp2)
Esempio n. 5
0
        def reloss(y_true, y_pred):
            """
            Custom loss to not penalize when the prediction is negative (dissimilar) and true label is 0.
            """

            loss_filter = maximum(y_true, y_pred)
            loss_filter = divide_no_nan(
                loss_filter, loss_filter)  # normalize any positive value to 1
            return multiply(loss_filter, MAE(y_true, y_pred))
Esempio n. 6
0
 def _graph_update(self, sample, beta, seed, pos):
     batch_size = sample.shape[0]
     r = self.learn_range
     i, j = tf.unstack(pos)
     seed.assign((seed * 1664525 + 1013904223) % 2**31)
     begin = tf.stack([0, tfm.maximum(i - 1, 0), tfm.maximum(j - r, 0), 0])
     end = tf.stack([batch_size, i + 1, tfm.minimum(j + r + 1, self.L), 1])
     sub_sample = tf.strided_slice(sample, begin, end)
     x_hat = self.call(sub_sample, beta)
     i_h = tfm.minimum(i, 1)
     j_h = tfm.minimum(j, r)
     probs = 0.5 if i == 0 and j == 0 else x_hat[:, i_h, j_h, 0]
     indices = tf.stack([
         tf.range(batch_size), i * self.full_ones, j * self.full_ones,
         self.full_zeros
     ], 1)
     updates = tf.random.stateless_binomial([batch_size], seed, 1., probs,
                                            tf.float32) * 2 - 1
     return tf.tensor_scatter_nd_add(sample, tf.cast(indices, tf.int32),
                                     updates)
Esempio n. 7
0
    def call(self, x):
        if self.res:
            h_stack, v_stack = tf.unstack(x, axis=-1)
        else:
            h_stack = x
            v_stack = x
        #Vertical stack is acted by a vertical convolution
        #equivalent to a masked one
        v_stack = self.ver_cropping(v_stack)
        v_stack = self.ver_padding(v_stack)
        v_stack = self.ver_conv(v_stack)

        #Horizontal stack is acted by a horizontal convolution
        #equivalent to a masked one- h_stack2 is kept for later
        h_stack2 = h_stack
        h_stack = self.hor_cropping(h_stack)
        h_stack = self.hor_padding(h_stack)
        h_stack = self.hor_conv(h_stack)

        #Connect horizontal and vertical stacks
        h_stack = tfm.add(h_stack, v_stack)

        #Convolve and act translationally invariant version of Prelu using
        #tf.maximum and passing user-defined scalar variable for alpha
        h_stack = tfm.maximum(self.alpha_scalar * h_stack, h_stack)
        h_stack = self.sec_conv(h_stack)
        if not self.last_layer:
            v_stack = tfm.maximum(self.alpha_scalar2 * h_stack, h_stack)
            v_stack = self.sec_conv2(v_stack)

        #Make a residual connection between input state and output
        if self.res == 1:
            h_stack2 = self.res_conv(h_stack2)
            h_stack = tfm.add(h_stack, h_stack2)

        if self.last_layer:
            output = h_stack
        else:
            output = tf.stack([h_stack, v_stack], axis=-1)
        #Act with non-linear activation function
        return output
Esempio n. 8
0
File: util.py Progetto: xclmj/pinn
def interpolate(grid, query_points, indexing="ij", name=None):
    """
    Reference: https://github.com/tensorflow/addons/blob/master/tensorflow_addons/image/dense_image_warp.py
    
    Similar to Matlab's interp2 function.
    Finds values for query points on a grid using bilinear interpolation.
    Args:
      grid: a 4-D float `Tensor` of shape `[batch, height, width, channels]`.
      query_points: a 3-D float `Tensor` of N points with shape
        `[batch, N, 2]`.
      indexing: whether the query points are specified as row and column (ij),
        or Cartesian coordinates (xy).
      name: a name for the operation (optional).
    Returns:
      values: a 3-D `Tensor` with shape `[batch, N, channels]`
    Raises:
      ValueError: if the indexing mode is invalid, or if the shape of the
        inputs invalid.
    """
    if indexing != "ij" and indexing != "xy":
        raise ValueError("Indexing mode must be \'ij\' or \'xy\'")

    with name_scope(name or "interpolate_bilinear"):
        grid = convert_to_tensor(grid)
        query_points = convert_to_tensor(query_points)

        if len(grid.shape) != 4:
            msg = "Grid must be 4 dimensional. Received size: "
            raise ValueError(msg + str(grid.shape))

        if len(query_points.shape) != 3:
            raise ValueError("Query points must be 3 dimensional.")

        if query_points.shape[2] is not None and query_points.shape[2] != 2:
            raise ValueError("Query points must be size 2 in dim 2.")

        if grid.shape[1] is not None and grid.shape[1] < 2:
            raise ValueError("Grid height must be at least 2.")

        if grid.shape[2] is not None and grid.shape[2] < 2:
            raise ValueError("Grid width must be at least 2.")

        grid_shape = shape(grid)
        query_shape = shape(query_points)

        batch_size, height, width, channels = (grid_shape[0], grid_shape[1],
                                               grid_shape[2], grid_shape[3])

        shape_list = [batch_size, height, width, channels]

        # pylint: disable=bad-continuation
        with control_dependencies([
                assert_equal(query_shape[2],
                             2,
                             message="Query points must be size 2 in dim 2.")
        ]):
            num_queries = query_shape[1]
        # pylint: enable=bad-continuation

        query_type = query_points.dtype
        grid_type = grid.dtype

        # pylint: disable=bad-continuation
        with control_dependencies([
                assert_greater_equal(
                    height, 2, message="Grid height must be at least 2."),
                assert_greater_equal(width,
                                     2,
                                     message="Grid width must be at least 2."),
        ]):
            alphas = []
            floors = []
            ceils = []
            index_order = [0, 1] if indexing == "ij" else [1, 0]
            unstacked_query_points = unstack(query_points, axis=2)
        # pylint: enable=bad-continuation

        for dim in index_order:
            with name_scope("dim-" + str(dim)):
                queries = unstacked_query_points[dim]

                size_in_indexing_dimension = shape_list[dim + 1]

                # max_floor is size_in_indexing_dimension - 2 so that max_floor + 1
                # is still a valid index into the grid.
                max_floor = cast(size_in_indexing_dimension - 2, query_type)
                min_floor = constant(0.0, dtype=query_type)
                floor_val = minimum(maximum(min_floor, floor(queries)),
                                    max_floor)
                int_floor = cast(floor_val, int32)
                floors.append(int_floor)
                ceil = int_floor + 1
                ceils.append(ceil)

                # alpha has the same type as the grid, as we will directly use alpha
                # when taking linear combinations of pixel values from the image.
                alpha = cast(queries - floor_val, grid_type)
                min_alpha = constant(0.0, dtype=grid_type)
                max_alpha = constant(1.0, dtype=grid_type)
                alpha = minimum(maximum(min_alpha, alpha), max_alpha)

                # Expand alpha to [b, n, 1] so we can use broadcasting
                # (since the alpha values don't depend on the channel).
                alpha = expand_dims(alpha, 2)
                alphas.append(alpha)

        # pylint: disable=bad-continuation
        with control_dependencies([
                assert_less_equal(
                    cast(batch_size * height * width, dtype=float32),
                    np.iinfo(np.int32).max / 8.0,
                    message="The image size or batch size is sufficiently "
                    "large that the linearized addresses used by tf.gather "
                    "may exceed the int32 limit.")
        ]):
            flattened_grid = reshape(grid,
                                     [batch_size * height * width, channels])
            batch_offsets = reshape(
                tfrange(batch_size) * height * width, [batch_size, 1])
        # pylint: enable=bad-continuation

        # This wraps tf.gather. We reshape the image data such that the
        # batch, y, and x coordinates are pulled into the first dimension.
        # Then we gather. Finally, we reshape the output back. It's possible this
        # code would be made simpler by using tf.gather_nd.
        def gather_fn(y_coords, x_coords, name):
            with name_scope("gather-" + name):
                linear_coordinates = (batch_offsets + y_coords * width +
                                      x_coords)
                gathered_values = gather(flattened_grid, linear_coordinates)
                return reshape(gathered_values,
                               [batch_size, num_queries, channels])

        # grab the pixel values in the 4 corners around each query point
        top_left = gather_fn(floors[0], floors[1], "top_left")
        top_right = gather_fn(floors[0], ceils[1], "top_right")
        bottom_left = gather_fn(ceils[0], floors[1], "bottom_left")
        bottom_right = gather_fn(ceils[0], ceils[1], "bottom_right")

        # now, do the actual interpolation
        with name_scope("interpolate"):
            interp_top = alphas[1] * (top_right - top_left) + top_left
            interp_bottom = alphas[1] * (bottom_right -
                                         bottom_left) + bottom_left
            interp = alphas[0] * (interp_bottom - interp_top) + interp_top

        return interp