Exemplo n.º 1
0
def calculate_potentials(
    r: np.ndarray, potential_law=lenard_jones_potential, out=None, *args, **kwargs
):
    """

    Parameters
    ----------
    r :
        Nx3 array of particle positions
    args :
    kwargs :
        passed along to the force law

    Notes
    -----
    1. get a NxNx3 antisymmetric (upper triangular) matrix of vector distances
    2a. from 1 get a normalized NxNx3 antisymmetric (matrix of direction vectors
    2b. from 1 get a NxN (upper triangular due to symmetry) matrix of scalar distances
    3b. get a NxN matrix of force magnitudes (reshapable to
    3. multiply 2a by 3b to get forces
    4. update existing force matrix

    Returns
    -------

    """
    # TODO optimize with upper triangular matrix
    N = r.shape[0]
    rij = r.reshape(N, 1, 3) - r.reshape(1, N, 3)
    distances_ij = np.sqrt(np.sum(rij ** 2, axis=2, keepdims=True))
    distances_ij[np.arange(N), np.arange(N), :] = np.inf
    potentials = potential_law(distances_ij, *args, **kwargs)
    return potentials.sum() / 2
Exemplo n.º 2
0
def _calculate_gumbel_poly(lx: np.ndarray, alpha: float, d: int, method: str, log: bool):
    """Inner function that does the actual Gumbel polynomial calculation"""
    k = np.arange(d) + 1

    if method == 'pois':
        n = len(lx)
        x = np.exp(lx)  # n x 1 vector

        lppois = np.array([poisson.logcdf(d - k, xx) for xx in x]).T  # d x n matrix
        llx = k.reshape(-1, 1) @ lx.reshape(1, -1)  # d x n matrix
        labs_poch = np.array([np.sum(np.log(np.abs(alpha * j - (k - 1)))) for j in k])
        lfac = gammaln(k + 1)  # d x 1 vector

        lxabs = llx + lppois + np.tile(labs_poch - lfac, (n, 1)).T + np.tile(x, (d, 1))

        signs = sign_ff(alpha, k, d)
        offset = np.max(lxabs, 0)
        sum_ = np.sum(signs[:, None] * np.exp(lxabs - offset[None, :]), 0)
        res = np.log(sum_) + offset

        return res if log else np.exp(res)
    elif method in ('direct', 'log', 'sort'):
        log_a_dk = gumbel_coef(d, alpha, method, True)

        log_x = log_a_dk[:, None] + k.reshape(-1, 1) @ lx.reshape(1, -1)
        x = np.exp(log_x).sum(0)
        return np.log(x) if log else x
    else:
        raise ValueError(f"Unknown <method>: {method}. Use one of pois, direct, log, sort")
Exemplo n.º 3
0
def ink(x: np.ndarray, y: np.ndarray, degree: int, a: int = -3) -> np.ndarray:
    assert _is_integer(degree) and degree > 0, "Degree must be positive integer"
    assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray), "X and Y must be numpy arrays"
    if len(x.shape) == 1:
        x = x.reshape(1, x.shape[0])
    if len(y.shape) == 1:
        y = y.reshape(1, y.shape[0])
    x[x < a] = a
    y[y < a] = a
    return K(x, y, degree, a) / np.sqrt(K_norm(x, y, degree, a, "x") * K_norm(x, y, degree, a, "y"))
Exemplo n.º 4
0
def new_ink(X: np.ndarray, Y: np.ndarray, degree: int, a: int = -3) -> np.ndarray:
    assert _is_integer(degree) and degree > 0, "Degree must be positive integer"
    assert isinstance(X, np.ndarray) and isinstance(Y, np.ndarray), "X and Y must be numpy arrays"
    if len(X.shape) == 1:
        X = X.reshape(1, X.shape[0])
    if len(Y.shape) == 1:
        Y = Y.reshape(1, Y.shape[0])
    X[X < a] = a
    Y[Y < a] = a
    return new_K(X, Y, degree, a) / new_K_norm(X, Y, degree, a)
Exemplo n.º 5
0
    def __find_dominant_colors(image: np.ndarray, n: int):
        """
         Znajduje n najbardziej dominujących kolorów na obrazie.

        :param np.ndarray image: badany obraz.
        :param int n: liczba poszukiwanych kolorów.
        :return: Znalezionye kolory, procent powierzni kolorów.
        :rtype: list, list, np.ndarray
        """

        # Przekształć obraz w listę pixeli.
        image = image.reshape((image.shape[0] * image.shape[1], 3))

        # Podziel obraz na grupy.
        clt = KMeans(n_clusters=n)
        clt.fit(image)

        # Oblicz histogram nowego obrazu.
        hist = ColorDetector.__centroid_histogram(clt)

        # Dokonaj konwersji i zaokrąglenia wyników.
        percents = hist
        colors = clt.cluster_centers_  # BGR

        color_bar = ColorDetector.__create_colors_bar(hist, clt.cluster_centers_)

        result_colors = []
        result_percents = []

        for col in colors:
            result_colors.append(((int(col[0])), int(col[1]), int(col[2])))
        for pen in percents:
            result_percents.append(round(pen, 3))

        return result_colors, result_percents, color_bar
Exemplo n.º 6
0
    def package_value(
            self,
            value: np.ndarray,  # data
            name: str,  # NDVar name
            info: dict = None,  # NDVar info
            meas: str = None,  # for NDVar info
    ):
        if not self.yshape:
            return value[0]

        # shape
        has_vector = value.shape[0] > self.yshape[0]
        if self.vector_dim and not has_vector:
            dims = self.ydims[:-1]
            shape = self.yshape[:-1]
        else:
            dims = self.ydims
            shape = self.yshape
        if not dims:
            return value[0]
        elif len(shape) > 1:
            value = value.reshape(shape)

        # info
        if meas:
            info = _info.for_stat_map(meas, old=info)
        elif info is None:
            info = self.y_info

        return NDVar(value, dims, info, name)
Exemplo n.º 7
0
def multidimensional_lambdify(variables, expression: ndarray):
    shape = expression.shape
    n = prod(shape)
    expression = tuple(expression.reshape((n,)))  # tuple required because lambdify does not work on ndarray

    function = lambdify(variables, expression)

    return lambda *values: reshape(function(*values), shape)
Exemplo n.º 8
0
    def __init__(self, args: list=None, values: np.ndarray=None):
        if values is not None:
            self.values = values.reshape((self.max_arg_num, self.depth))
        else:
            self.values = np.zeros((self.max_arg_num, self.depth), dtype=np.float32)

        if args:
            for i, v in enumerate(args):
                self.update_to(i, v)
Exemplo n.º 9
0
    def __init__(self, train_x: np.ndarray, train_y: np.ndarray, features_name=None, do_standardization=True):
        # ensure that train_y is (N x 1)
        train_y = train_y.reshape((train_y.shape[0], 1))
        self.train_x = train_x
        self._raw_train_x = train_x.copy()
        self._raw_train_y = train_y.copy()
        self.train_y = train_y
        self.features_name = features_name

        self.do_standardization = do_standardization
        self._x_std_ = None
        self._x_mean_ = None
Exemplo n.º 10
0
def multitask_to_tuples(x: np.ndarray, y: np.ndarray, intercept=True):
    if y.ndim == 1:
        y = y.reshape((-1, 1))
    n_cat = y.shape[1]
    data = []
    for i in range(x.shape[0]):
        if intercept:
            inputs = [((0, n_cat + 1), 1.0)] + [((0, j + n_cat + 2), x[i, j]) for j in range(x.shape[1])]
        else:
            inputs = [((0, j + n_cat + 1), x[i, j]) for j in range(x.shape[1])]
        outputs = [((0, cat + 1), y[i, cat]) for cat in range(y.shape[1])]
        data.append((inputs, outputs))
    return data
Exemplo n.º 11
0
def write_ndarray_to_tsv(output_file: str,
                         array: np.ndarray,
                         comment=io_consts.default_comment_char,
                         delimiter=io_consts.default_delimiter_char,
                         extra_comment_lines: Optional[List[str]] = None,
                         header: Optional[str] = None,
                         write_shape_info: bool = True) -> None:
    """Write an vector or matrix ndarray to .tsv file.

    Note:
        Shape and dtype information are stored in the header.

    Args:
        output_file: output .tsv file
        array: array to write to .tsv
        comment: comment character
        delimiter: delimiter character
        extra_comment_lines: (optional) list of extra comment lines to add to the header
        header: header line (e.g. for representing the ndarray as a table with named columns)
        write_shape_info: if True, ndarray shape info will be written to the header

    Returns:
        None
    """
    array = np.asarray(array)
    assert array.ndim <= 2
    shape = array.shape
    dtype = array.dtype
    if array.ndim == 2:
        array_matrix = array
    else:
        array_matrix = array.reshape((array.size, 1))

    with open(output_file, 'w') as f:
        if write_shape_info:
            f.write(comment + 'shape=' + repr(shape) + '\n')
            f.write(comment + 'dtype=' + str(dtype) + '\n')
        if extra_comment_lines is not None:
            for comment_line in extra_comment_lines:
                f.write(comment + comment_line + '\n')
        if header is not None:
            f.write(header + '\n')
        for i_row in range(array_matrix.shape[0]):
            row = array_matrix[i_row, :]
            row_repr = delimiter.join([repr(x) for x in row])
            f.write(row_repr + '\n')
Exemplo n.º 12
0
def xyz_to_luv(xyz_nd: ndarray) -> ndarray:
    flat_shape = (xyz_nd.size // 3, 3)
    luv_flat = np.zeros(flat_shape, dtype=np.float)  # flattened luv n-dim array
    xyz_flat = xyz_nd.reshape(flat_shape)
    X, Y, Z = (xyz_flat[..., n] for n in range(3))

    with np.errstate(invalid="ignore"):  # ignore divide by zero
        U_var = ne.evaluate("(4 * X) / (X + (15 * Y) + (3 * Z))")
        V_var = ne.evaluate("(9 * Y) / (X + (15 * Y) + (3 * Z))")
    U_var[np.isinf(U_var)] = 0  # correct divide by zero
    V_var[np.isinf(V_var)] = 0  # correct divide by zero

    L, U, V = (luv_flat[..., n] for n in range(3))
    L[:] = _f(Y)
    ref_u, ref_v = constants.REF_U, constants.REF_V
    U[:] = ne.evaluate("L * 13 * (U_var - ref_u)")
    V[:] = ne.evaluate("L * 13 * (V_var - ref_v)")
    luv_flat[np.isnan(luv_flat)] = 0
    return luv_flat.reshape(xyz_nd.shape)
Exemplo n.º 13
0
def xyz_to_luv(xyz_nd: ndarray) -> ndarray:
    flat_shape = (xyz_nd.size // 3, 3)
    luv_flat = np.zeros(flat_shape, dtype=np.float)  # flattened luv n-dim array
    xyz_flat = xyz_nd.reshape(flat_shape)
    X, Y, Z = (_channel(xyz_flat, n) for n in range(3))

    with np.errstate(invalid="ignore"):  # ignore divide by zero
        U_var = (4 * X) / (X + (15 * Y) + (3 * Z))
        V_var = (9 * Y) / (X + (15 * Y) + (3 * Z))
    U_var[np.isinf(U_var)] = 0  # correct divide by zero
    V_var[np.isinf(V_var)] = 0  # correct divide by zero

    L, U, V = (_channel(luv_flat, n) for n in range(3))
    L[:] = _f(Y)
    luv_flat[L == 0] = 0
    U[:] = L * 13 * (U_var - constants.REF_U)
    V[:] = L * 13 * (V_var - constants.REF_V)
    luv_flat = np.nan_to_num(luv_flat)
    return luv_flat.reshape(xyz_nd.shape)
Exemplo n.º 14
0
def husl_to_lch(husl_nd: ndarray) -> ndarray:
    flat_shape = (husl_nd.size // 3, 3)
    lch_flat = np.zeros(flat_shape, dtype=np.float)
    husl_flat = husl_nd.reshape(flat_shape)
    _H, S, _L = (_channel(husl_flat, n) for n in range(3))
    L, C, H = (_channel(lch_flat, n) for n in range(3))
    L[:] = _L
    H[:] = _H

    # compute max chroma for lightness and hue
    mx = _max_lh_chroma(lch_flat)
    C[:] = mx / 100.0 * S

    # handle lightness extremes
    light= L > L_MAX
    dark = L < L_MIN
    L[light] = 100
    C[light] = 0
    L[dark] = 0
    C[dark] = 0
    return lch_flat.reshape(husl_nd.shape)
Exemplo n.º 15
0
def luv_to_xyz(luv_nd: ndarray) -> ndarray:
    flat_shape = (luv_nd.size // 3, 3)
    xyz_flat = np.zeros(flat_shape, dtype=np.float)  # flattened xyz array
    luv_flat = luv_nd.reshape(flat_shape)
    L, U, V = (_channel(luv_flat, n) for n in range(3))
    X, Y, Z = (_channel(xyz_flat, n) for n in range(3))

    Y_var = _f_inv(L)
    L13 = 13.0 * L
    with np.errstate(divide="ignore", invalid="ignore"):  # ignore divide by zero
        U_var = U / L13 + constants.REF_U
        V_var = V / L13 + constants.REF_V
    U_var[np.isinf(U_var)] = 0  # correct divide by zero
    V_var[np.isinf(V_var)] = 0  # correct divide by zero

    Y[:] = Y_var * constants.REF_Y
    with np.errstate(invalid="ignore"):
        X[:] = -(9 * Y * U_var) / ((U_var - 4.0) * V_var - U_var * V_var)
        Z[:] = (9.0 * Y - (15.0 * V_var * Y) - (V_var * X)) / (3.0 * V_var)
    xyz_flat[L == 0] = 0
    xyz_flat = np.nan_to_num(xyz_flat)
    return xyz_flat.reshape(luv_nd.shape)
Exemplo n.º 16
0
def lch_to_husl(lch_nd: ndarray) -> ndarray:
    flat_shape = (lch_nd.size // 3, 3)
    lch_flat = lch_nd.reshape(flat_shape)
    _L, C, _H = (_channel(lch_flat, n) for n in range(3))
    hsl_flat = np.zeros(flat_shape, dtype=np.float)
    H, S, L = (_channel(hsl_flat, n) for n in range(3))
    H[:] = _H
    L[:] = _L

    # handle lightness extremes
    light = _L > L_MAX
    dark = _L < L_MIN
    S[light] = 0.0
    L[light] = 100.0
    S[dark] = 0.0
    L[dark] = 0.0

    # compute saturation for pixels that aren't too light or dark
    remaining = ~np.logical_or(light, dark)
    mx = _max_lh_chroma(lch_flat[remaining])
    S[remaining] = (C[remaining] / mx) * 100.0

    return hsl_flat.reshape(lch_nd.shape)
Exemplo n.º 17
0
 def forward(self, x: np.ndarray) -> np.ndarray:
     self.x_shape = x.shape
     return x.reshape(self.shape)
Exemplo n.º 18
0
 def get_Q2(self, states: np.ndarray):
     states = torch.Tensor(states.reshape(-1, self.input_dim))
     self.dqn2.train(mode=False)
     return self.dqn2(states)
Exemplo n.º 19
0
 def from_binary_segment(cls, segment_data: np.ndarray) -> 'TaborSegment':
     return cls(data=segment_data.reshape((-1, 2, 16)))
Exemplo n.º 20
0
def permute_systems(
    input_mat: np.ndarray,
    perm: List[int],
    dim=None,
    row_only: bool = False,
    inv_perm: bool = False,
) -> np.ndarray:
    r"""
    Permute subsystems within a state or operator.

    Permutes the order of the subsystems of the vector or matrix `input_mat`
    according to the permutation vector `perm`, where the dimensions of the
    subsystems are given by the vector `dim`. If `input_mat` is non-square and
    not a vector, different row and column dimensions can be specified by
    putting the row dimensions in the first row of `dim` and the columns
    dimensions in the second row of `dim`.

    If `row_only = True`, then only the rows of `input_mat` are permuted, but
    not the columns -- this is equivalent to multiplying `input_mat` on the
    left by the corresponding permutation operator, but not on the right.

    If `row_only = False`, then `dim` only needs to contain the row dimensions
    of the subsystems, even if `input_mat` is not square. If `inv_perm = True`,
    then the inverse permutation of `perm` is applied instead of `perm` itself.

    Examples
    ==========

    For spaces :math:`\mathcal{A}` and :math:`\mathcal{B}` where
    :math:`\text{dim}(\mathcal{A}) = \text{dim}(\mathcal{B}) = 2` we may
    consider an operator :math:`X \in \mathcal{A} \otimes \mathcal{B}`. Applying
    the `permute_systems` function with vector :math:`[2,1]` on :math:`X`, we
    may reorient the spaces such that
    :math:`X \in \mathcal{B} \otimes \mathcal{A}`.

    For example, if we define :math:`X \in \mathcal{A} \otimes \mathcal{B}` as

    .. math::
        X = \begin{pmatrix}
            1 & 2 & 3 & 4 \\
            5 & 6 & 7 & 8 \\
            9 & 10 & 11 & 12 \\
            13 & 14 & 15 & 16
        \end{pmatrix}

    then applying the `permute_systems` function on :math:`X` to obtain
    :math:`X \in \mathcal{B} \otimes \mathcal{A}` yield the following matrix

    .. math::
        X_{[2,1]} = \begin{pmatrix}
            1 & 3 & 2 & 4 \\
            9 & 11 & 10 & 12 \\
            5 & 7 & 6 & 8 \\
            13 & 15 & 14 & 16
        \end{pmatrix}

    >>> from toqito.perms.permute_systems import permute_systems
    >>> import numpy as np
    >>> test_input_mat = np.array(
    >>>    [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
    >>> )
    >>> permute_systems(test_input_mat, [2, 1])
    [[ 1  3  2  4]
     [ 9 11 10 12]
     [ 5  7  6  8]
     [13 15 14 16]]

    For spaces :math:`\mathcal{A}, \mathcal{B}`, and :math:`\mathcal{C}`
    where :math:`\text{dim}(\mathcal{A}) = \text{dim}(\mathcal{B}) =
    \text{dim}(\mathcal{C}) = 2` we may consider an operator
    :math:`X \in \mathcal{A} \otimes \mathcal{B} \otimes \mathcal{C}`. Applying
    the `permute_systems` function with vector :math:`[2,3,1]` on :math:`X`, we
    may reorient the spaces such that
    :math:`X \in \mathcal{B} \otimes \mathcal{C} \otimes \mathcal{A}`.

    For example, if we define
    :math:`X \in \mathcal{A} \otimes \mathcal{B} \otimes \mathcal{C}` as

    .. math::
        X =
        \begin{pmatrix}
            1 & 2 & 3 & 4, 5 & 6 & 7 & 8 \\
            9 & 10 & 11 & 12 & 13 & 14 & 15 & 16 \\
            17 & 18 & 19 & 20 & 21 & 22 & 23 & 24 \\
            25 & 26 & 27 & 28 & 29 & 30 & 31 & 32 \\
            33 & 34 & 35 & 36 & 37 & 38 & 39 & 40 \\
            41 & 42 & 43 & 44 & 45 & 46 & 47 & 48 \\
            49 & 50 & 51 & 52 & 53 & 54 & 55 & 56 \\
            57 & 58 & 59 & 60 & 61 & 62 & 63 & 64
        \end{pmatrix}

    then applying the `permute_systems` function on :math:`X` to obtain
    :math:`X \in \mathcal{B} \otimes \mathcal{C} \otimes \mathcal{C}` yield the
    following matrix

    .. math::
        X_{[2, 3, 1]} =
        \begin{pmatrix}
            1 & 5 & 2 & 6 & 3 & 7 & 4, 8 \\
            33 & 37 & 34 & 38 & 35 & 39 & 36 & 40 \\
            9 & 13 & 10 & 14 & 11 & 15 & 12 & 16 \\
            41 & 45 & 42 & 46 & 43 & 47 & 44 & 48 \\
            17 & 21 & 18 & 22 & 19 & 23 & 20 & 24 \\
            49 & 53 & 50 & 54 & 51 & 55 & 52 & 56 \\
            25 & 29 & 26 & 30 & 27 & 31 & 28 & 32 \\
            57 & 61 & 58 & 62 & 59 & 63 & 60 & 64
        \end{pmatrix}

    >>> from toqito.perms.permute_systems import permute_systems
    >>> import numpy as np
    >>> test_input_mat = np.array(
    >>>    [
    >>>        [1, 2, 3, 4, 5, 6, 7, 8],
    >>>        [9, 10, 11, 12, 13, 14, 15, 16],
    >>>        [17, 18, 19, 20, 21, 22, 23, 24],
    >>>        [25, 26, 27, 28, 29, 30, 31, 32],
    >>>        [33, 34, 35, 36, 37, 38, 39, 40],
    >>>        [41, 42, 43, 44, 45, 46, 47, 48],
    >>>        [49, 50, 51, 52, 53, 54, 55, 56],
    >>>        [57, 58, 59, 60, 61, 62, 63, 64],
    >>>    ]
    >>> )
    >>> permute_systems(test_input_mat, [2, 3, 1])
    [[ 1  5  2  6  3  7  4  8]
     [33 37 34 38 35 39 36 40]
     [ 9 13 10 14 11 15 12 16]
     [41 45 42 46 43 47 44 48]
     [17 21 18 22 19 23 20 24]
     [49 53 50 54 51 55 52 56]
     [25 29 26 30 27 31 28 32]
     [57 61 58 62 59 63 60 64]]

    :param input_mat: The vector or matrix.
    :param perm: A permutation vector.
    :param dim: The default has all subsystems of equal dimension.
    :param row_only: Default: `False`
    :param inv_perm: Default :`True`
    :return: The matrix or vector that has been permuted.
    """
    if len(input_mat.shape) == 1:
        input_mat_dims = (1, input_mat.shape[0])
    else:
        input_mat_dims = input_mat.shape

    is_vec = np.min(input_mat_dims) == 1
    num_sys = len(perm)

    if dim is None:
        x_tmp = input_mat_dims[0] ** (1 / num_sys) * np.ones(num_sys)
        y_tmp = input_mat_dims[1] ** (1 / num_sys) * np.ones(num_sys)
        dim = np.array([x_tmp, y_tmp])

    if is_vec:
        # 1 if column vector
        if len(input_mat.shape) > 1:
            vec_orien = 0
        # 2 if row vector
        elif len(input_mat.shape) == 1:
            vec_orien = 1
        else:
            raise ValueError(
                "InvalidMat: Length of tuple of dimensions "
                "specifying the input matrix can only be of "
                "length 1 or length 2."
            )

    if len(dim.shape) == 1:
        # Force dim to be a row vector.
        dim_tmp = dim[:].T
        if is_vec:
            dim = np.ones((2, len(dim)))
            dim[vec_orien, :] = dim_tmp
        else:
            dim = np.array([[dim_tmp], [dim_tmp]])

    prod_dim_r = int(np.prod(dim[0, :]))
    prod_dim_c = int(np.prod(dim[1, :]))

    if len(perm) != num_sys:
        raise ValueError("InvalidPerm: `len(perm)` must be equal to " "`len(dim)`.")
    if sorted(perm) != list(range(1, num_sys + 1)):
        raise ValueError("InvalidPerm: `perm` must be a permutation vector.")
    if input_mat_dims[0] != prod_dim_r or (
        not row_only and input_mat_dims[1] != prod_dim_c
    ):
        raise ValueError(
            "InvalidDim: The dimensions specified in DIM do not "
            "agree with the size of X."
        )
    if is_vec:
        if inv_perm:
            permuted_mat_1 = input_mat.reshape(
                dim[vec_orien, ::-1].astype(int), order="F"
            )
            permuted_mat = vec(
                np.transpose(permuted_mat_1, num_sys - np.array(perm[::-1]))
            ).T
            # We need to flatten out the array.
            permuted_mat = functools.reduce(operator.iconcat, permuted_mat, [])
        else:
            permuted_mat_1 = input_mat.reshape(
                dim[vec_orien, ::-1].astype(int), order="F"
            )
            permuted_mat = vec(
                np.transpose(permuted_mat_1, num_sys - np.array(perm[::-1]))
            ).T
            # We need to flatten out the array.
            permuted_mat = functools.reduce(operator.iconcat, permuted_mat, [])
        return np.array(permuted_mat)

    vec_arg = np.array(list(range(0, input_mat_dims[0])))

    # If the dimensions are specified, ensure they are given to the
    # recursive calls as flattened lists.
    if len(dim[0][:]) == 1:
        dim = functools.reduce(operator.iconcat, dim, [])

    row_perm = permute_systems(vec_arg, perm, dim[0][:], False, inv_perm)

    # This condition is only necessary if the `input_mat` variable is sparse.
    if isinstance(input_mat, (sparse.csr_matrix, sparse.dia_matrix)):
        input_mat = input_mat.toarray()
        permuted_mat = input_mat[row_perm, :]
        permuted_mat = np.array(permuted_mat)
    else:
        permuted_mat = input_mat[row_perm, :]

    if not row_only:
        vec_arg = np.array(list(range(0, input_mat_dims[1])))
        col_perm = permute_systems(vec_arg, perm, dim[1][:], False, inv_perm)
        permuted_mat = permuted_mat[:, col_perm]

    return permuted_mat
Exemplo n.º 21
0
def reshaped(data: np.ndarray) -> np.ndarray:
    dim = dimension()
    return data.reshape(dim, dim)
Exemplo n.º 22
0
 def parameters(self, p: np.ndarray):
     """
     Update the policy parameters. Input is a 1D numpy array of siactionactionze m x|A|
     """
     self.w = p.reshape(self.w.shape)
Exemplo n.º 23
0
 def _prep_output_data( out_data: np.ndarray, time_dimension, trailing_shape: tuple):
     return np.moveaxis(out_data.reshape((out_data.shape[0], *trailing_shape)), 0, time_dimension)
Exemplo n.º 24
0
    def __init__(
        self,
        parent_points: np.ndarray,
        parent_fitness: np.ndarray,
        n_offspring: Optional[int] = None,
        initial_step_size: float = 1e-4,
        success_notion: str = "population",
        indicator: Optional[Indicator] = None,
        max_generations: Optional[int] = None,
        max_evaluations: Optional[int] = None,
        target_indicator_value: Optional[float] = None,
        parameters: Optional[MOParameters] = None,
        rng: Optional[np.random.Generator] = None,
        cov_model: str = "full",
    ) -> None:
        if len(parent_points.shape) < 2:
            parent_points = parent_points.reshape((1, len(parent_points)))
        if len(parent_fitness.shape) < 2:
            parent_fitness = parent_fitness.reshape((1, len(parent_fitness)))
        self._n_dimensions = parent_points.shape[1]
        self._n_objectives = parent_fitness.shape[1]

        if success_notion == "individual":
            self._success_notion = SuccessNotion.IndividualBased
        elif success_notion == "population":
            self._success_notion = SuccessNotion.PopulationBased
        else:
            raise ValueError("Invalid value for success_notion.")

        if cov_model == "full":
            self._cov_model = CovModel.Full
        elif cov_model == "cholesky":
            raise NotImplementedError(
                "Support for Cholesky factors has not been implemented.")
        else:
            raise ValueError("Invalid value for cov_model.")

        self._n_parents = parent_points.shape[0]
        if n_offspring is None:
            self._n_offspring = self._n_parents
        else:
            self._n_offspring = n_offspring

        if parameters is None:
            self._parameters = MOParameters(
                n_dimensions=self._n_dimensions,
                initial_step_size=initial_step_size,
            )
        else:
            self._parameters = parameters
            if self._parameters.n_dimensions != self._n_dimensions:
                raise ValueError(
                    "Invalid value for n_dimensions in provided parameters")

        self._stopping_conditions = MOStoppingConditions(
            max_generations=max_generations,
            max_evaluations=max_evaluations,
            target_indicator_value=target_indicator_value,
        )

        if rng is None:
            self._rng = np.random.default_rng()
        else:
            self._rng = rng

        if indicator is None:
            self._indicator = HypervolumeIndicator()
        else:
            self._indicator = indicator

        self._population = FixedSizePopulation(
            n_dimensions=self._n_dimensions,
            n_objectives=self._n_objectives,
            n_parents=self._n_parents,
            n_offspring=self._n_offspring,
        )
        self._population.points[:self._n_parents, :] = parent_points
        self._population.fitness[:self._n_parents, :] = parent_fitness
        self._population.penalized_fitness[:self.
                                           _n_parents, :] = parent_fitness
        self._population.p_succ[:] = self._parameters.p_target_succ
        self._population.step_size[:] = self._parameters.initial_step_size

        self._parent_ranks = np.ones(self._n_parents)
        self._parent_idx = np.arange(self._n_parents)
        self._evaluation_count = 0
        self._generation_count = 0
        self._ask_called = False
Exemplo n.º 25
0
    def predict(
        self,
        observation: np.ndarray,
        state: Optional[np.ndarray] = None,
        mask: Optional[np.ndarray] = None,
        deterministic: bool = False,
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Get the policy action and state from an observation (and optional state).
        Includes sugar-coating to handle different observations (e.g. normalizing images).

        :param observation: the input observation
        :param state: The last states (can be None, used in recurrent policies)
        :param mask: The last masks (can be None, used in recurrent policies)
        :param deterministic: Whether or not to return deterministic actions.
        :return: the model's action and the next state
            (used in recurrent policies)
        """
        # TODO (GH/1): add support for RNN policies
        # if state is None:
        #     state = self.initial_state
        # if mask is None:
        #     mask = [False for _ in range(self.n_envs)]
        if isinstance(observation, dict):
            observation = ObsDictWrapper.convert_dict(observation)
        else:
            observation = np.array(observation)

        # Handle the different cases for images
        # as PyTorch use channel first format
        if is_image_space(self.observation_space):
            if not (observation.shape == self.observation_space.shape
                    or observation.shape[1:] == self.observation_space.shape):
                # Try to re-order the channels
                transpose_obs = VecTransposeImage.transpose_image(observation)
                if (transpose_obs.shape == self.observation_space.shape
                        or transpose_obs.shape[1:]
                        == self.observation_space.shape):
                    observation = transpose_obs

        vectorized_env = is_vectorized_observation(observation,
                                                   self.observation_space)

        observation = observation.reshape((-1, ) +
                                          self.observation_space.shape)

        observation = th.as_tensor(observation).to(self.device)
        with th.no_grad():
            actions = self._predict(observation, deterministic=deterministic)
        # Convert to numpy
        actions = actions.cpu().numpy()

        if isinstance(self.action_space, gym.spaces.Box):
            if self.squash_output:
                # Rescale to proper domain when using squashing
                actions = self.unscale_action(actions)
            else:
                # Actions could be on arbitrary scale, so clip the actions to avoid
                # out of bound error (e.g. if sampling from a Gaussian distribution)
                actions = np.clip(actions, self.action_space.low,
                                  self.action_space.high)

        if not vectorized_env:
            if state is not None:
                raise ValueError(
                    "Error: The environment must be vectorized when using recurrent policies."
                )
            actions = actions[0]

        return actions, state
Exemplo n.º 26
0
def pcacov(x: np.ndarray):
    imcol = x.reshape(3, x.shape[0] * x.shape[1])
    ce, cv = np.linalg.eigh(np.cov(imcol))
    return ce, cv
Exemplo n.º 27
0
    def u(self, w: np.ndarray, b, x: np.ndarray, y: np.ndarray):
        new_s = np.vstack((x.reshape((-1, 1)), y))
        new_s = w.T.dot(new_s) + b
        new_s = self.u_activation(new_s[0])

        return new_s[0]
Exemplo n.º 28
0
 def get_Q(self, states: np.ndarray) -> torch.FloatTensor:
     states = torch.Tensor(states.reshape(-1, self.input_dim))
     self.dqn.train(mode=False)
     return self.dqn(states)
Exemplo n.º 29
0
 def predict_with_object(self, pic_object: np.ndarray) -> str:
     pic_object = toolbox.compress_frame(pic_object)
     pic_object = self.feature_func(pic_object)
     pic_object = pic_object.reshape(1, -1)
     return self._model.predict(pic_object)[0]
Exemplo n.º 30
0
    def expectation_from_density_matrix(self,
                                        state: np.ndarray,
                                        qubit_map: Mapping[raw_types.Qid, int],
                                        *,
                                        atol: float = 1e-7,
                                        check_preconditions: bool = True
                                       ) -> float:
        r"""Evaluate the expectation of this PauliString given a density matrix.

        Compute the expectation value of this PauliString with respect to an
        array representing a density matrix. By convention expectation values
        are defined for Hermitian operators, and so this method will fail if
        this PauliString is non-Hermitian.

        `state` must be an array representation of a density matrix and have
        shape `(2 ** n, 2 ** n)` or `(2, 2, ..., 2)` (2*n entries), where
        `state` is expressed over n qubits.

        `qubit_map` must assign an integer index to each qubit in this
        PauliString that determines which bit position of a computational basis
        state that qubit corresponds to. For example if `state` represents
        $|0\rangle |+\rangle$ and `q0, q1 = cirq.LineQubit.range(2)` then:

            cirq.X(q0).expectation(state, qubit_map={q0: 0, q1: 1}) = 0
            cirq.X(q0).expectation(state, qubit_map={q0: 1, q1: 0}) = 1

        Args:
            state: An array representing a valid  density matrix.
            qubit_map: A map from all qubits used in this PauliString to the
                indices of the qubits that `state` is defined over.
            atol: Absolute numerical tolerance.
            check_preconditions: Whether to check that `state` represents a
                valid density matrix.

        Returns:
            The expectation value of the input state.

        Raises:
            NotImplementedError if this PauliString is non-Hermitian.
        """
        if abs(self.coefficient.imag) > 0.0001:
            raise NotImplementedError(
                'Cannot compute expectation value of a non-Hermitian '
                f'PauliString <{self}>. Coefficient must be real.')

        # FIXME: Avoid enforcing specific complex type. This is necessary to
        # prevent an `apply_unitary` bug (Issue #2041).
        if state.dtype.kind != 'c':
            raise TypeError("Input state dtype must be np.complex64 or "
                            "np.complex128")

        size = state.size
        num_qubits = int(np.sqrt(size)).bit_length() - 1
        dim = 1 << num_qubits
        if state.shape != (dim, dim) and state.shape != (2, 2) * num_qubits:
            raise ValueError("Input array does not represent a density matrix "
                             "with shape `(2 ** n, 2 ** n)` or `(2, ..., 2)`.")

        _validate_qubit_mapping(qubit_map, self.qubits, num_qubits)
        if check_preconditions:
            # Do not enforce reshaping if the state all axes are dimension 2.
            _ = qis.to_valid_density_matrix(density_matrix_rep=state.reshape(
                dim, dim),
                                            num_qubits=num_qubits,
                                            dtype=state.dtype,
                                            atol=atol)
        return self._expectation_from_density_matrix_no_validation(
            state, qubit_map)
Exemplo n.º 31
0
def nanpercentile(
    values: np.ndarray,
    q,
    axis: int,
    na_value,
    mask: np.ndarray,
    ndim: int,
    interpolation,
):
    """
    Wrapper for np.percentile that skips missing values.

    Parameters
    ----------
    values : array over which to find quantiles
    q : scalar or array of quantile indices to find
    axis : {0, 1}
    na_value : scalar
        value to return for empty or all-null values
    mask : ndarray[bool]
        locations in values that should be considered missing
    ndim : {1, 2}
    interpolation : str

    Returns
    -------
    quantiles : scalar or array
    """
    if values.dtype.kind in ["m", "M"]:
        # need to cast to integer to avoid rounding errors in numpy
        result = nanpercentile(values.view("i8"), q, axis, na_value.view("i8"),
                               mask, ndim, interpolation)

        # Note: we have to do do `astype` and not view because in general we
        #  have float result at this point, not i8
        return result.astype(values.dtype)

    if not lib.is_scalar(mask) and mask.any():
        if ndim == 1:
            return _nanpercentile_1d(values,
                                     mask,
                                     q,
                                     na_value,
                                     interpolation=interpolation)
        else:
            # for nonconsolidatable blocks mask is 1D, but values 2D
            if mask.ndim < values.ndim:
                mask = mask.reshape(values.shape)
            if axis == 0:
                values = values.T
                mask = mask.T
            result = [
                _nanpercentile_1d(val,
                                  m,
                                  q,
                                  na_value,
                                  interpolation=interpolation)
                for (val, m) in zip(list(values), list(mask))
            ]
            result = np.array(result, dtype=values.dtype, copy=False).T
            return result
    else:
        return np.percentile(values, q, axis=axis, interpolation=interpolation)
Exemplo n.º 32
0
def matrix_vector_mul(mat: matrix, vec: ndarray):
    return dot(mat, vec.reshape(3, 1)).reshape(3, 1)
Exemplo n.º 33
0
def EnforceConnectivity(L1: np.ndarray, L2: np.ndarray, a1: np.ndarray,
                        a2: np.ndarray, b1: np.ndarray, b2: np.ndarray,
                        x1: np.ndarray, x2: np.ndarray, y1: np.ndarray,
                        y2: np.ndarray, W: np.ndarray, label: np.ndarray,
                        threshold: int, nRows: int, nCols: int):
    print("[{}] EnforceConnectivity...".format(time.ctime()[11:19]))
    print("\t[{}] [EnforceConnectivity.py] step_1/3".format(
        time.ctime()[11:19]))
    mask = np.zeros([nRows, nCols], dtype=np.bool)
    strayX = []  # unsigned short
    strayY = []  # unsigned short
    Size = []  # unsigned short
    xLoc = []  # unsigned short
    yLoc = []  # unsigned short
    centerL1 = []  # double
    centerL2 = []  # double
    centera1 = []  # double
    centera2 = []  # double
    centerb1 = []  # double
    centerb2 = []  # double
    centerx1 = []  # double
    centerx2 = []  # double
    centery1 = []  # double
    centery2 = []  # double
    centerW = []  # double

    sLabel = -1  # int

    for i in range(nRows):
        for j in range(nCols):
            if mask[i][j] == 0:
                sLabel += 1
                Count = 1
                centerL1.append(0)
                centerL2.append(0)
                centera1.append(0)
                centera2.append(0)
                centerb1.append(0)
                centerb2.append(0)
                centerx1.append(0)
                centerx2.append(0)
                centery1.append(0)
                centery2.append(0)
                centerW.append(0)
                strayX.append(i)
                strayY.append(j)
                Weight = W[i][j]  # double
                centerL1[sLabel] += L1[i][j] * Weight
                centerL2[sLabel] += L2[i][j] * Weight
                centera1[sLabel] += a1[i][j] * Weight
                centera2[sLabel] += a2[i][j] * Weight
                centerb1[sLabel] += b1[i][j] * Weight
                centerb2[sLabel] += b2[i][j] * Weight
                centerx1[sLabel] += x1[i][j] * Weight
                centerx2[sLabel] += x2[i][j] * Weight
                centery1[sLabel] += y1[i][j] * Weight
                centery2[sLabel] += y2[i][j] * Weight
                centerW[sLabel] += W[i][j]
                L = label[i * nCols + j]
                label[i * nCols + j] = sLabel
                mask[i][j] = 1
                xLoc.append(i)
                yLoc.append(j)
                while len(xLoc) > 0:
                    x = xLoc.pop(0)
                    y = yLoc.pop(0)
                    minX = 0 if x - 1 <= 0 else x - 1
                    maxX = nRows - 1 if x + 1 >= nRows - 1 else x + 1
                    minY = 0 if y - 1 <= 0 else y - 1
                    maxY = nCols - 1 if y + 1 >= nCols - 1 else y + 1
                    for m in range(minX, maxX + 1):
                        for n in range(minY, maxY + 1):
                            if not mask[m][n] and label[m * nCols + n] == L:
                                Count += 1
                                xLoc.append(m)
                                yLoc.append(n)
                                mask[m][n] = 1
                                label[m * nCols + n] = sLabel
                                Weight = W[m][n]
                                centerL1[sLabel] += L1[m][n] * Weight
                                centerL2[sLabel] += L2[m][n] * Weight
                                centera1[sLabel] += a1[m][n] * Weight
                                centera2[sLabel] += a2[m][n] * Weight
                                centerb1[sLabel] += b1[m][n] * Weight
                                centerb2[sLabel] += b2[m][n] * Weight
                                centerx1[sLabel] += x1[m][n] * Weight
                                centerx2[sLabel] += x2[m][n] * Weight
                                centery1[sLabel] += y1[m][n] * Weight
                                centery2[sLabel] += y2[m][n] * Weight
                                centerW[sLabel] += W[m][n]
                Size.append(Count)
                centerL1[sLabel] /= centerW[sLabel]
                centerL2[sLabel] /= centerW[sLabel]
                centera1[sLabel] /= centerW[sLabel]
                centera2[sLabel] /= centerW[sLabel]
                centerb1[sLabel] /= centerW[sLabel]
                centerb2[sLabel] /= centerW[sLabel]
                centerx1[sLabel] /= centerW[sLabel]
                centerx2[sLabel] /= centerW[sLabel]
                centery1[sLabel] /= centerW[sLabel]
                centery2[sLabel] /= centerW[sLabel]
    sLabel += 1
    Count = 0

    if TEST_LABEL_STEP_1:
        data = loadmat("test_matlab_data\\test_27_EC_label_step1.mat")
        print(
            compare_matrix.compare_2D_matrix(label.reshape([nRows, nCols]),
                                             data["tLabel"].transpose([1, 0]),
                                             1000, 0))
        exit()

    print("\t[{}] [EnforceConnectivity.py] step_2/3".format(
        time.ctime()[11:19]))
    Sarray = []  # vector<Superpixel> Sarray;
    for i in range(sLabel):
        if Size[i] < threshold:
            x = strayX[i]
            y = strayY[i]
            L = label[x * nCols + y]
            mask[x][y] = 0
            indexMark = 0
            S = Superpixel(L, Size[i])
            S.xLoc.append(x)
            S.yLoc.append(y)
            while indexMark < len(S.xLoc):
                x = S.xLoc[indexMark]
                y = S.yLoc[indexMark]
                indexMark += 1
                minX = 0 if x - 1 <= 0 else x - 1
                maxX = nRows - 1 if x + 1 >= nRows - 1 else x + 1
                minY = 0 if y - 1 <= 0 else y - 1
                maxY = nCols - 1 if y + 1 >= nCols - 1 else y + 1
                for m in range(minX, maxX + 1):
                    for n in range(minY, maxY + 1):
                        if mask[m][n] and label[m * nCols + n] == L:
                            mask[m][n] = 0
                            S.xLoc.append(m)
                            S.yLoc.append(n)
                        elif label[m * nCols + n] != L:
                            NewLabel = label[m * nCols + n]
                            if NewLabel not in S.Neighbor:
                                S.Neighbor.insert(0, NewLabel)
            Sarray.append(S)

    if TEST_LABEL_STEP_2:
        data = loadmat("test_matlab_data\\test_27_EC_label_step2.mat")
        print(
            compare_matrix.compare_2D_matrix(label.reshape([nRows, nCols]),
                                             data["tLabel"].transpose([1, 0]),
                                             1000, 0))
        exit()

    print("\t[{}] [EnforceConnectivity.py] step_3/3".format(
        time.ctime()[11:19]))
    S = 0
    while len(Sarray) > 0:
        MinDist = DBL_MAX
        Label1 = int(Sarray[S].Label)
        Label2 = -1
        for I in range(len(Sarray[S].Neighbor)):
            D = (centerL1[Label1] - centerL1[Sarray[S].Neighbor[I]]) * (centerL1[Label1] - centerL1[Sarray[S].Neighbor[I]]) + \
                (centerL2[Label1] - centerL2[Sarray[S].Neighbor[I]]) * (centerL2[Label1] - centerL2[Sarray[S].Neighbor[I]]) + \
                (centera1[Label1] - centera1[Sarray[S].Neighbor[I]]) * (centera1[Label1] - centera1[Sarray[S].Neighbor[I]]) + \
                (centera2[Label1] - centera2[Sarray[S].Neighbor[I]]) * (centera2[Label1] - centera2[Sarray[S].Neighbor[I]]) + \
                (centerb1[Label1] - centerb1[Sarray[S].Neighbor[I]]) * (centerb1[Label1] - centerb1[Sarray[S].Neighbor[I]]) + \
                (centerb2[Label1] - centerb2[Sarray[S].Neighbor[I]]) * (centerb2[Label1] - centerb2[Sarray[S].Neighbor[I]]) + \
                (centerx1[Label1] - centerx1[Sarray[S].Neighbor[I]]) * (centerx1[Label1] - centerx1[Sarray[S].Neighbor[I]]) + \
                (centerx2[Label1] - centerx2[Sarray[S].Neighbor[I]]) * (centerx2[Label1] - centerx2[Sarray[S].Neighbor[I]]) + \
                (centery1[Label1] - centery1[Sarray[S].Neighbor[I]]) * (centery1[Label1] - centery1[Sarray[S].Neighbor[I]]) + \
                (centery2[Label1] - centery2[Sarray[S].Neighbor[I]]) * (centery2[Label1] - centery2[Sarray[S].Neighbor[I]])
            if abs(D - MinDist) > 1e-6:
                MinDist = D
                Label2 = Sarray[S].Neighbor[I]
        W1 = centerW[Label1]
        W2 = centerW[Label2]
        W = W1 + W2
        centerL1[Label2] = (W2 * centerL1[Label2] + W1 * centerL1[Label1]) / W
        centerL2[Label2] = (W2 * centerL2[Label2] + W1 * centerL2[Label1]) / W
        centera1[Label2] = (W2 * centera1[Label2] + W1 * centera1[Label1]) / W
        centera2[Label2] = (W2 * centera2[Label2] + W1 * centera2[Label1]) / W
        centerb1[Label2] = (W2 * centerb1[Label2] + W1 * centerb1[Label1]) / W
        centerb2[Label2] = (W2 * centerb2[Label2] + W1 * centerb2[Label1]) / W
        centerx1[Label2] = (W2 * centerx1[Label2] + W1 * centerx1[Label1]) / W
        centerx2[Label2] = (W2 * centerx2[Label2] + W1 * centerx2[Label1]) / W
        centery1[Label2] = (W2 * centery1[Label2] + W1 * centery1[Label1]) / W
        centery2[Label2] = (W2 * centery2[Label2] + W1 * centery2[Label1]) / W
        centerW[Label2] = W

        for i in range(len(Sarray[S].xLoc)):
            x = Sarray[S].xLoc[i]
            y = Sarray[S].yLoc[i]
            label[x * nCols + y] = Label2

        if Superpixel(Label2) in Sarray:
            Stmp = Sarray.index(Superpixel(Label2))
            Size[Label2] = Size[Label1] + Size[Label2]
            if Size[Label2] >= threshold:
                del Sarray[Stmp]
                del Sarray[S]
            else:
                Sarray[Stmp].xLoc.extend(Sarray[S].xLoc)
                Sarray[Stmp].yLoc.extend(Sarray[S].yLoc)
                Sarray[Stmp].Neighbor.extend(Sarray[S].Neighbor)
                Sarray[Stmp].Neighbor = list(set(Sarray[Stmp].Neighbor))
                Sarray[Stmp].Neighbor.sort()
                I = Sarray[Stmp].Neighbor.index(Label1)
                del Sarray[Stmp].Neighbor[I]
                I = Sarray[Stmp].Neighbor.index(Label2)
                del Sarray[Stmp].Neighbor[I]
                del Sarray[S]
        else:
            del Sarray[S]

        for i in range(len(Sarray)):
            if Label1 in Sarray[i].Neighbor and Label2 in Sarray[i].Neighbor:
                I = Sarray[i].Neighbor.index(Label1)
                del Sarray[i].Neighbor[I]
            elif Label1 in Sarray[i].Neighbor and Label2 not in Sarray[
                    i].Neighbor:
                I = Sarray[i].Neighbor.index(Label1)
                Sarray[i].Neighbor[I] = Label2
        S = 0
    return label
Exemplo n.º 34
0
 def parameters(self, p: np.ndarray):
     self._theta = p.reshape(self._theta.shape)
Exemplo n.º 35
0
def AdjustBoardPrespective(matrixboard: np.ndarray, colour):
    board = matrixboard.reshape(361)

    return (board == colour).astype(int)
Exemplo n.º 36
0
def save_raster(
    file_name: Union[str, Path],
    value_array: np.ndarray,
    crs: Union[CRS, int],
    coordinate_array: Optional[np.ndarray] = None,
    affine: Optional[Affine] = None,
    nodata: Union[None, float, int] = None,
    compress: bool = False,
) -> None:

    if len(value_array.shape) == 3:
        height, width, layers = value_array.shape
    else:
        height, width = value_array.shape
        layers = 1

        value_array = value_array.reshape(height, width, layers)

    _compress = None
    if compress:
        _compress = "lzw"

    if affine is None:
        if coordinate_array is None:
            raise ValueError("please, provide array of coordinate per pixel")
        affine = calc_affine(coordinate_array)

    if type(crs) == int:
        crs = CRS.from_epsg(crs)
    if nodata is not None:
        with rasterio.open(
                file_name,
                "w",
                driver="GTiff",
                height=height,
                width=width,
                count=layers,
                dtype=value_array.dtype,
                crs=crs,
                transform=affine,
                nodata=nodata,
                compress=_compress,
        ) as raster:
            for layer in range(layers):
                raster.write(value_array[:, :, layer], layer + 1)
    else:
        with rasterio.open(
                file_name,
                "w",
                driver="GTiff",
                height=height,
                width=width,
                count=layers,
                dtype=value_array.dtype,
                crs=crs,
                transform=affine,
                compress=_compress,
        ) as raster:
            for layer in range(layers):
                raster.write(value_array[:, :, layer], layer + 1)
    print(f"{file_name} saved")
Exemplo n.º 37
0
    def Dist_Objective(predt: np.ndarray, data: lgb.Dataset):
        """A customized objective function to train each distributional parameter using custom gradient and hessian.

        """

        target = data.get_label()

        # When num_class!= 0, preds has shape (n_obs, n_dist_param).
        # Each element in a row represents a raw prediction (leaf weight, hasn't gone through response function yet).
        predt = predt.reshape(-1, BCT.n_dist_param(), order="F")
        preds_location = BCT.param_dict()["location"](predt[:, 0])
        preds_scale = BCT.param_dict()["scale"](predt[:, 1])
        preds_nu = BCT.param_dict()["nu"](predt[:, 2])
        preds_tau = BCT.param_dict()["tau"](predt[:, 3])

        # Weights
        if data.get_weight() == None:
            # Use 1 as weight if no weights are specified
            weights = np.ones_like(target, dtype=float)
        else:
            weights = data.get_weight()

        # Initialize Gradient and Hessian Matrices
        grad = np.zeros(shape=(len(target), BCT.n_dist_param()))
        hess = np.zeros(shape=(len(target), BCT.n_dist_param()))

        # Location
        grad[:, 0] = BCT.gradient_location(y=target,
                                           location=preds_location,
                                           scale=preds_scale,
                                           nu=preds_nu,
                                           tau=preds_tau,
                                           weights=weights)

        hess[:, 0] = BCT.hessian_location(location=preds_location,
                                          scale=preds_scale,
                                          nu=preds_nu,
                                          tau=preds_tau,
                                          weights=weights)

        # Scale
        grad[:, 1] = BCT.gradient_scale(y=target,
                                        location=preds_location,
                                        scale=preds_scale,
                                        nu=preds_nu,
                                        tau=preds_tau,
                                        weights=weights)

        hess[:, 1] = BCT.hessian_scale(scale=preds_scale,
                                       tau=preds_tau,
                                       weights=weights)

        # Nu
        grad[:, 2] = BCT.gradient_nu(y=target,
                                     location=preds_location,
                                     scale=preds_scale,
                                     nu=preds_nu,
                                     tau=preds_tau,
                                     weights=weights)

        hess[:, 2] = BCT.hessian_nu(scale=preds_scale, weights=weights)

        # Tau
        grad[:, 3] = BCT.gradient_tau(y=target,
                                      location=preds_location,
                                      scale=preds_scale,
                                      nu=preds_nu,
                                      tau=preds_tau,
                                      weights=weights)

        hess[:, 3] = BCT.hessian_tau(tau=preds_tau, weights=weights)

        # Reshaping
        grad = grad.ravel(order="F")
        hess = hess.ravel(order="F")

        return grad, hess
Exemplo n.º 38
0
def classify(clf, feats: np.ndarray) -> int:
    return int(clf.predict(feats.reshape(1, -1)))
Exemplo n.º 39
0
 def reshape_as_input(self, array: np.ndarray, recurrent: bool):
     return array if recurrent else array.reshape(
         array.shape[0] * array.shape[1], *array.shape[2:])
Exemplo n.º 40
0
 def _transform_images(self, images: np.ndarray):
     return images.reshape(images.shape[0], 
                                         images.shape[1], 
                                         images.shape[2],
                                         1)
Exemplo n.º 41
0
    def fit(
        self,
        X: np.ndarray,
        loss_fn: tf.keras.losses = elbo,
        optimizer: tf.keras.optimizers = tf.keras.optimizers.Adam(
            learning_rate=1e-3),
        cov_elbo: dict = dict(sim=.05),
        epochs: int = 20,
        batch_size: int = 64,
        verbose: bool = True,
        log_metric: Tuple[str, "tf.keras.metrics"] = None,
        callbacks: tf.keras.callbacks = None,
    ) -> None:
        """
        Train VAE model.

        Parameters
        ----------
        X
            Training batch.
        loss_fn
            Loss function used for training.
        optimizer
            Optimizer used for training.
        cov_elbo
            Dictionary with covariance matrix options in case the elbo loss function is used.
            Either use the full covariance matrix inferred from X (dict(cov_full=None)),
            only the variance (dict(cov_diag=None)) or a float representing the same standard deviation
            for each feature (e.g. dict(sim=.05)).
        epochs
            Number of training epochs.
        batch_size
            Batch size used for training.
        verbose
            Whether to print training progress.
        log_metric
            Additional metrics whose progress will be displayed if verbose equals True.
        callbacks
            Callbacks used during training.
        """
        # train arguments
        args = [self.vae, loss_fn, X]
        kwargs = {
            'optimizer': optimizer,
            'epochs': epochs,
            'batch_size': batch_size,
            'verbose': verbose,
            'log_metric': log_metric,
            'callbacks': callbacks
        }

        # initialize covariance matrix if elbo loss fn is used
        use_elbo = loss_fn.__name__ == 'elbo'
        cov_elbo_type, cov = [*cov_elbo][0], [*cov_elbo.values()][0]
        if use_elbo and cov_elbo_type in ['cov_full', 'cov_diag']:
            cov = tfp.stats.covariance(X.reshape(X.shape[0], -1))
            if cov_elbo_type == 'cov_diag':  # infer standard deviation from covariance matrix
                cov = tf.math.sqrt(tf.linalg.diag_part(cov))
        if use_elbo:
            kwargs['loss_fn_kwargs'] = {
                cov_elbo_type: tf.dtypes.cast(cov, tf.float32)
            }

        # train
        trainer(*args, **kwargs)
Exemplo n.º 42
0
def reconstitute_raster(
    blocks: np.ndarray,
    raster_height: int,
    raster_width: int,
    size: int,
    offset: Tuple[Number, Number],
    border_patches: bool,
    border_patches_x: bool,
    border_patches_y: bool,
) -> np.ndarray:
    """Recombines blocks into an array.
    Args:
        blocks (ndarray): A numpy array with the values to recombine. The shape
        should be (blocks, rows, column, channel).

        raster_height (int): height in pixels of target raster.

        raster_width (int): width in pixels of target raster.

        size (int): size of patches in pixels. (square patches.)

        offset (tuple): A tuple with the offset of the blocks (x, y)

        border_patches (bool): Does the patches contain border_patches?

        border_patches_x (bool): Does the patches contain border_patches on the x axis?

        border_patches_y (bool): Does the patches contain border_patches on the y axis?

    Returns:
        A reconstituted raster.
    """
    type_check(blocks, [np.ndarray], "blocks")
    type_check(raster_height, [int], "raster_height")
    type_check(raster_width, [int], "raster_width")
    type_check(size, [int], "size")
    type_check(offset, [tuple], "offset")
    type_check(border_patches, [bool], "border_patches")
    type_check(border_patches_x, [bool], "border_patches_x")
    type_check(border_patches_y, [bool], "border_patches_y")

    ref_shape = [raster_height - offset[1], raster_width - offset[0]]

    if offset != (0, 0):
        border_patches = False
        border_patches_x = False
        border_patches_y = False

    if border_patches and (border_patches_x or border_patches_y):
        if border_patches_x:
            ref_shape[1] = ((ref_shape[1] // size) * size) + size
        if border_patches_y:
            ref_shape[0] = ((ref_shape[0] // size) * size) + size

    reshape = blocks.reshape(
        ref_shape[0] // size,
        ref_shape[1] // size,
        size,
        size,
        blocks.shape[3],
        blocks.shape[3],
    )

    swap = reshape.swapaxes(1, 2)

    destination = swap.reshape((ref_shape[0] // size) * size,
                               (ref_shape[1] // size) * size, blocks.shape[3])

    # Order: Y, X, Z
    if border_patches and (border_patches_x or border_patches_y):

        x_offset = 0
        y_offset = 0

        if border_patches_x:
            x_offset = int(ref_shape[1] - raster_width)
            x_edge = destination[:raster_height, -(size - x_offset):, :]
            destination[:raster_height, -size:-x_offset, :] = x_edge

        if border_patches_y:
            y_offset = int(ref_shape[0] - raster_height)
            y_edge = destination[-(size - y_offset):, :raster_width, :]
            destination[-size:-y_offset, :raster_width, :] = y_edge

        if border_patches_y and border_patches_y:
            corner = destination[-(size - y_offset):, -(size - x_offset):, :]
            destination[-size:-y_offset, -size:-x_offset, :] = corner

        destination = destination[:raster_height, :raster_width,
                                  0:blocks.shape[3]]

    return destination
Exemplo n.º 43
0
def flatten_mnist_input(input: np.ndarray) -> np.ndarray:
    return input.reshape(28**2)
Exemplo n.º 44
0
 def predict(self, image: np.ndarray):
     height, width, num_channel = image.shape
     assert num_channel == 3  # only BGR, no alpha
     probs = self.model.predict(image.reshape(-1, 3))  # type: np.ndarray
     return probs.reshape((height, width))
Exemplo n.º 45
0
def uncertainty_plot(y: np.ndarray, y_hat: np.ndarray, title: str = '') -> Tuple[mpl.figure.Figure, np.ndarray]:
    """Plots probability plot alongside a hydrograph with simulation percentiles.
    
    The probability plot itself is analogous to the calibration plot for classification tasks. The plot compares the 
    theoretical percentiles of the estimated conditional distributions (over time) with the respective relative 
    empirical counts. 
    The probability plot is often also referred to as probability integral transform diagram, Q-Q plot, or predictive 
    Q-Q plot. 
    

    Parameters
    ----------
    y : np.ndarray
        Array of observed values.
    y_hat : np.ndarray
        Array of simulated values.
    title : str, optional
        Title of the plot, by default empty.

    Returns
    -------
    Tuple[mpl.figure.Figure, np.ndarray]
        The uncertainty plot.
    """

    fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(6.5, 3), gridspec_kw={'width_ratios': [4, 5]})

    # only take part of y to have a better zoom-in
    y_long = y.flatten()
    y_hat_long = y_hat.reshape(y_long.shape[0], -1)
    x_bnd = np.arange(0, 400)
    y_bnd_len = len(x_bnd)

    # hydrograph:
    y_r = [0, 0, 0, 0, 0, 0]  # used later for probability-plot
    quantiles = [0.9, 0.80, 0.50, 0.20, 0.1]
    labels_and_colors = {
        'labels': ['05-95 PI', '10-90 PI', '25-75 PI', '40-60 PI', '45-55 PI'],
        'colors': ['#FDE725', '#8FD744', '#21908C', '#31688E', '#443A83']
    }
    for idx in range(len(quantiles)):
        lb = round(50 - (quantiles[idx] * 100) / 2)
        ub = round(50 + (quantiles[idx] * 100) / 2)
        y_lb = np.percentile(y_hat_long[x_bnd, :], lb, axis=-1).flatten()
        y_ub = np.percentile(y_hat_long[x_bnd, :], ub, axis=-1).flatten()
        y_r[idx] = np.sum(((y_long[x_bnd] > y_lb) * (y_long[x_bnd] < y_ub))) / y_bnd_len
        if idx <= 3:
            axs[1].fill_between(x_bnd,
                                y_lb,
                                y_ub,
                                color=labels_and_colors['colors'][idx],
                                label=labels_and_colors['labels'][idx])

    y_median = np.median(y_hat_long, axis=-1).flatten()
    axs[1].plot(x_bnd, y_median[x_bnd], '-', color='red', label="median")
    axs[1].plot(x_bnd, y_long[x_bnd], '--', color='black', label="observed")
    axs[1].legend(prop={'size': 5})
    axs[1].set_ylabel("runoff")
    axs[1].set_xlabel("time index")
    # probability-plot:
    quantiles = np.arange(0, 101, 5)
    y_r = quantiles * 0.0
    for idx in range(len(y_r)):
        ub = quantiles[idx]
        y_ub = np.percentile(y_hat_long[x_bnd, :], ub, axis=-1).flatten()
        y_r[idx] = np.sum(y_long[x_bnd] < y_ub) / y_bnd_len

    axs[0].plot([0, 1], [0, 1], 'k--')
    axs[0].plot(quantiles / 100, y_r, 'ro', ms=3.0)
    axs[0].set_axisbelow(True)
    axs[0].yaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].yaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].set_xlabel("theoretical quantile frequency")
    axs[0].set_ylabel("count")

    fig.suptitle(title, fontsize=14)
    fig.tight_layout(rect=[0, 0.1, 1, 0.95])

    return fig, axs
Exemplo n.º 46
0
    def tell(
        self,
        input_fitness: np.ndarray,
        input_penalized_fitness: Optional[np.ndarray] = None,
        evaluation_count: Optional[int] = None,
    ) -> None:
        """
        Pass fitness information to the optimizer.

        Parameters
        ----------
        input_fitness
            The fitness of the search points.
        input_penalized_fitness: optional
            The penalized fitness of the search points. \
            Use case: constrained functions.
        evaluation_count: optional
            Total evaluation count. Use case: noisy functions.

        Raises
        ------
        RuntimeError
            When `tell` is called before `ask`.

        Notes
        -----
        Assumes stored offspring data (i.e covariance matrices) corresponds to
        the search points produced by the last call to `ask`.
        """
        if not self._ask_called:
            raise RuntimeError("Tell called before ask")
        # Convenience local variables to improve legibility.
        # We create views of the arrays, for example, when we update p_succ[i]
        # we actually update self._population.p_succ[i]
        n_parents = self._n_parents
        n_offspring = self._n_offspring
        points = self._population.points[:]
        fitness = self._population.fitness[:]
        penalized_fitness = self._population.penalized_fitness[:]
        p_succ = self._population.p_succ[:]
        step_size = self._population.step_size[:]
        path = self._population.path[:]
        cov = self._population.cov[:]
        parents = self._population.parents[:]

        # Update using input data
        # If the input array has only one dimension, it is a single point
        # with shape (n_objectives,) so we reshape it to have shape
        # (1, n_objectives)
        if len(input_fitness.shape) == 1:
            input_fitness = input_fitness.reshape(1, len(input_fitness))
        fitness[n_parents:] = input_fitness

        if input_penalized_fitness is None:
            penalized_fitness[n_parents:] = input_fitness
        else:
            if len(input_penalized_fitness.shape) == 1:
                input_penalized_fitness = input_penalized_fitness.reshape(
                    1, len(input_penalized_fitness))
            penalized_fitness[n_parents:] = input_penalized_fitness

        if evaluation_count is None:
            self._evaluation_count += len(input_fitness)
        else:
            self._evaluation_count = evaluation_count

        selected, ranks = indicator_selection(self._indicator,
                                              penalized_fitness, n_parents)

        # Perform adaptation
        # [2007:mo-cma-es] Algorithm 4, lines 8-10
        old_step_size = step_size.copy()
        for oidx in range(n_parents, n_parents + n_offspring):
            pidx = parents[oidx]
            # Updates should only occur if individuals are selected and
            # successful (see [2008:shark], URL: https://git.io/Jty6G).
            offspring_is_successful = 0.0
            if self._success_notion == SuccessNotion.IndividualBased:
                # [2010:mo-cma-es] Section 3.1, p. 489
                if selected[oidx] and ranks[oidx] <= ranks[pidx]:
                    offspring_is_successful = 1.0
                    self._update_step_size(oidx, offspring_is_successful)
                    x_step = (points[oidx] -
                              points[pidx]) / old_step_size[pidx]
                    self._update_covariance_matrix(oidx, x_step)
            elif self._success_notion == SuccessNotion.PopulationBased:
                # [2010:mo-cma-es] Section 3.2, p. 489
                if selected[oidx]:
                    offspring_is_successful = 1.0
                    self._update_step_size(oidx, offspring_is_successful)
                    x_step = (points[oidx] -
                              points[pidx]) / old_step_size[pidx]
                    self._update_covariance_matrix(oidx, x_step)
            if selected[pidx]:
                self._update_step_size(pidx, offspring_is_successful)

        # Complete the selection process
        # [2007:mo-cma-es] Algorithm 4, lines 11-13
        points[:n_parents] = points[selected]
        fitness[:n_parents] = fitness[selected]
        penalized_fitness[:n_parents] = penalized_fitness[selected]
        step_size[:n_parents] = step_size[selected]
        cov[:n_parents] = cov[selected]
        path[:n_parents] = path[selected]
        p_succ[:n_parents] = p_succ[selected]
        self._parent_ranks[:] = ranks[selected]

        self._generation_count += 1
        self._ask_called = False
Exemplo n.º 47
0
def interpolate_2d(
    values: np.ndarray,
    method: str = "pad",
    axis: Axis = 0,
    limit: int | None = None,
    limit_area: str | None = None,
) -> None:
    """
    Perform an actual interpolation of values, values will be make 2-d if
    needed fills inplace, returns the result.

    Parameters
    ----------
    values: np.ndarray
        Input array.
    method: str, default "pad"
        Interpolation method. Could be "bfill" or "pad"
    axis: 0 or 1
        Interpolation axis
    limit: int, optional
        Index limit on interpolation.
    limit_area: str, optional
        Limit area for interpolation. Can be "inside" or "outside"

    Notes
    -----
    Modifies values in-place.
    """
    if limit_area is not None:
        np.apply_along_axis(
            # error: Argument 1 to "apply_along_axis" has incompatible type
            # "partial[None]"; expected
            # "Callable[..., Union[_SupportsArray[dtype[<nothing>]],
            # Sequence[_SupportsArray[dtype[<nothing>]]],
            # Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]],
            # Sequence[Sequence[Sequence[_SupportsArray[dtype[<nothing>]]]]],
            # Sequence[Sequence[Sequence[Sequence[_
            # SupportsArray[dtype[<nothing>]]]]]]]]"
            partial(  # type: ignore[arg-type]
                _interpolate_with_limit_area,
                method=method,
                limit=limit,
                limit_area=limit_area,
            ),
            # error: Argument 2 to "apply_along_axis" has incompatible type
            # "Union[str, int]"; expected "SupportsIndex"
            axis,  # type: ignore[arg-type]
            values,
        )
        return

    transf = (lambda x: x) if axis == 0 else (lambda x: x.T)

    # reshape a 1 dim if needed
    if values.ndim == 1:
        if axis != 0:  # pragma: no cover
            raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
        values = values.reshape(tuple((1,) + values.shape))

    method = clean_fill_method(method)
    tvalues = transf(values)

    # _pad_2d and _backfill_2d both modify tvalues inplace
    if method == "pad":
        _pad_2d(tvalues, limit=limit)
    else:
        _backfill_2d(tvalues, limit=limit)

    return
Exemplo n.º 48
0
def vectorize(vec: ndarray) -> ndarray:
    """
    Return a ndarray of shape (3,1)
    """
    return vec.reshape(3, 1)