Esempio n. 1
0
def calculate_potentials(
    r: np.ndarray, potential_law=lenard_jones_potential, out=None, *args, **kwargs
):
    """

    Parameters
    ----------
    r :
        Nx3 array of particle positions
    args :
    kwargs :
        passed along to the force law

    Notes
    -----
    1. get a NxNx3 antisymmetric (upper triangular) matrix of vector distances
    2a. from 1 get a normalized NxNx3 antisymmetric (matrix of direction vectors
    2b. from 1 get a NxN (upper triangular due to symmetry) matrix of scalar distances
    3b. get a NxN matrix of force magnitudes (reshapable to
    3. multiply 2a by 3b to get forces
    4. update existing force matrix

    Returns
    -------

    """
    # TODO optimize with upper triangular matrix
    N = r.shape[0]
    rij = r.reshape(N, 1, 3) - r.reshape(1, N, 3)
    distances_ij = np.sqrt(np.sum(rij ** 2, axis=2, keepdims=True))
    distances_ij[np.arange(N), np.arange(N), :] = np.inf
    potentials = potential_law(distances_ij, *args, **kwargs)
    return potentials.sum() / 2
Esempio n. 2
0
def get_statistics(matrix: np.ndarray, masktotal: Union[np.ndarray, int, None],
                   mask: Union[np.ndarray, int, None] = None) -> Dict:
    """Calculate different statistics of a detector image, such as sum, max,
    center of gravity, etc."""
    assert (isinstance(matrix, np.ndarray))
    if mask is None:
        mask = 1
    if masktotal is None:
        masktotal = 1
    assert isinstance(masktotal, np.ndarray) or isinstance(masktotal, int)
    assert isinstance(mask, np.ndarray) or isinstance(mask, int)
    result = {}
    matrixorig = matrix
    for prefix, mask in [('total_', masktotal), ('', mask)]:
        matrix = matrixorig * mask
        x = np.arange(matrix.shape[0])
        y = np.arange(matrix.shape[1])
        result[prefix + 'sum'] = matrix.sum()
        result[prefix + 'max'] = matrix.max()
        result[prefix + 'beamx'] = (matrix * x[:, np.newaxis]).sum() / result[prefix + 'sum']
        result[prefix + 'beamy'] = (matrix * y[np.newaxis, :]).sum() / result[prefix + 'sum']
        result[prefix + 'sigmax'] = (
                                        (matrix * (x[:, np.newaxis] - result[prefix + 'beamx']) ** 2).sum() /
                                        result[prefix + 'sum']) ** 0.5
        result[prefix + 'sigmay'] = (
                                        (matrix * (y[np.newaxis, :] - result[prefix + 'beamy']) ** 2).sum() /
                                        result[prefix + 'sum']) ** 0.5
        result[prefix + 'sigma'] = (result[prefix + 'sigmax'] ** 2 + result[prefix + 'sigmay'] ** 2) ** 0.5
    return result
Esempio n. 3
0
def std_dev_contrast_stretch(arr: np.ndarray, n=2):
    """ Performs a contrast stretch from +/-2σ around the mean to 
        -1 to 1. 
        """
    sigma = arr.std()*n
    m = arr.mean()
    return np.interp(arr,[m-sigma,m+sigma],[-1,1])
Esempio n. 4
0
def stretch(array: np.ndarray, min: int=0, max: int=1, fill_dtype=None) -> np.array:
    """'Stretch' the profile to the fit a new min and max value and interpolate in between.
    From: http://www.labri.fr/perso/nrougier/teaching/numpy.100/  exercise #17

    Parameters
    ----------
    array: numpy.ndarray
        The numpy array to stretch.
    min : number
        The new minimum of the values.
    max : number
        The new maximum value.
    fill_dtype : numpy data type
        If None (default), the array will be stretched to the passed min and max.
        If a numpy data type (e.g. np.int16), the array will be stretched to fit the full range of values
        of that data type. If a value is given for this parameter, it overrides ``min`` and ``max``.
    """
    new_max = max
    new_min = min
    if fill_dtype is not None:
        try:
            di = np.iinfo(fill_dtype)
        except ValueError:
            di = np.finfo(fill_dtype)
        new_max = di.max
        new_min = di.min
    # perfectly normalize the array (0..1)
    stretched_array = (array - array.min())/(array.max() - array.min())
    # stretch normalized array to new max/min
    stretched_array *= new_max
    stretched_array += new_min
    return stretched_array.astype(array.dtype)
def create_binary_confusion_matrix(
    truth_binary_values: np.ndarray, prediction_binary_values: np.ndarray, name=None
) -> pd.Series:
    # This implementation is:
    # ~30x faster than sklearn.metrics.confusion_matrix
    # ~25x faster than sklearn.metrics.confusion_matrix(labels=[False, True])
    # ~6x faster than pandas.crosstab
    truth_binary_values = truth_binary_values.ravel()
    prediction_binary_values = prediction_binary_values.ravel()

    truth_binary_negative_values = 1 - truth_binary_values
    test_binary_negative_values = 1 - prediction_binary_values

    true_positive = np.sum(np.logical_and(truth_binary_values, prediction_binary_values))
    true_negative = np.sum(
        np.logical_and(truth_binary_negative_values, test_binary_negative_values)
    )
    false_positive = np.sum(np.logical_and(truth_binary_negative_values, prediction_binary_values))
    false_negative = np.sum(np.logical_and(truth_binary_values, test_binary_negative_values))

    # Storing the matrix as a Series instead of a DataFrame makes it easier to reference elements
    # and aggregate multiple matrices
    cm = pd.Series(
        {'TP': true_positive, 'TN': true_negative, 'FP': false_positive, 'FN': false_negative},
        name=name,
    )

    return cm
Esempio n. 6
0
def calc_metric(candidate_img_blurred: np.ndarray, candidate_kpts: np.ndarray, candidate_length: int,
url_img_blurred: np.ndarray, url_kpts: np.ndarray, url_length: int, kpt_pos: tuple) -> float:
    candidate_kpts, url_kpts = serial.deserialize_keypoints(candidate_kpts), serial.deserialize_keypoints(url_kpts)
    m, n = kpt_pos[0], kpt_pos[1]
    #print((m,n))
    if(m == -1):
        return 1
    candidate_img_blurred, url_img_blurred = image.adjust_size(candidate_img_blurred, url_img_blurred)
    c_x, c_y = candidate_img_blurred.shape
    u_x, u_y = url_img_blurred.shape
    x, y = np.array(candidate_kpts[n].pt) - np.array(url_kpts[m].pt)
    if(x + c_x < 0 or x + u_x < 0):
        return 1
    if(y + c_y < 0 or y + u_y < 0):
        return 1
    candidate_img_blurred = image.align_text(candidate_img_blurred, (int(x), int(y)))
    candidate_img_blurred, url_img_blurred =  candidate_img_blurred.astype(int), url_img_blurred.astype(int)
    img_diff = abs(candidate_img_blurred - url_img_blurred)
    img_diff = img_diff.astype(int)
    divisor = max(candidate_img_blurred.size, url_img_blurred.size)
    diff = len(np.where(img_diff > 10)[0]) / float(divisor)
    return diff
    penalty = abs((float(candidate_length) - url_length)) / max(candidate_length, url_length)
    diff = diff / (1.0 - penalty * 10)
    return abs(diff)
       
Esempio n. 7
0
def _calculate_gumbel_poly(lx: np.ndarray, alpha: float, d: int, method: str, log: bool):
    """Inner function that does the actual Gumbel polynomial calculation"""
    k = np.arange(d) + 1

    if method == 'pois':
        n = len(lx)
        x = np.exp(lx)  # n x 1 vector

        lppois = np.array([poisson.logcdf(d - k, xx) for xx in x]).T  # d x n matrix
        llx = k.reshape(-1, 1) @ lx.reshape(1, -1)  # d x n matrix
        labs_poch = np.array([np.sum(np.log(np.abs(alpha * j - (k - 1)))) for j in k])
        lfac = gammaln(k + 1)  # d x 1 vector

        lxabs = llx + lppois + np.tile(labs_poch - lfac, (n, 1)).T + np.tile(x, (d, 1))

        signs = sign_ff(alpha, k, d)
        offset = np.max(lxabs, 0)
        sum_ = np.sum(signs[:, None] * np.exp(lxabs - offset[None, :]), 0)
        res = np.log(sum_) + offset

        return res if log else np.exp(res)
    elif method in ('direct', 'log', 'sort'):
        log_a_dk = gumbel_coef(d, alpha, method, True)

        log_x = log_a_dk[:, None] + k.reshape(-1, 1) @ lx.reshape(1, -1)
        x = np.exp(log_x).sum(0)
        return np.log(x) if log else x
    else:
        raise ValueError(f"Unknown <method>: {method}. Use one of pois, direct, log, sort")
Esempio n. 8
0
    def __call__(self, data: np.ndarray, learning_rate: float =1.0,
                 steps: int =1000, db: bool =True) -> List[float]:
        """ `Learn` the parameters of best fit for the given data and model """

        _min = data.min()
        _max = data.max()

        # scale amplitude to [0, 1]
        self.data = (data - _min) / (_max - _min)

        self.cubeX, self.cubeY = data.shape
        self.learning_rate = learning_rate
        self.steps = steps

        # perform the fit
        result = self.simplefit()

        # unscale amplitude of resultant
        result[0] = result[0] * (_max - _min) + _min

        result_as_list = result.tolist()

        self._counter += 1

        return result_as_list
Esempio n. 9
0
    def compute_statistics(self, sample: np.ndarray) -> Tuple:
        """
        Computes mean and variance of a sample

        :param sample: A sample to compute statistics for.
        :return: A tuple (mean, variance).
        """
        return sample.mean(), sample.var()
Esempio n. 10
0
def masks(mask: np.ndarray) -> Sequence[np.ndarray]:
    masks = [mask]
    mask2 = mask.copy()
    mask2[0, 0, 0] = 1
    masks.append(mask2)
    mask3 = mask.copy()
    mask3[2, 2, 2] = 0
    masks.append(mask3)
    return masks
Esempio n. 11
0
def convert_data_to_format(data: np.ndarray, filename: str):
    if filename.endswith(".wav"):
        return (data.view(np.float32) * 32767).astype(np.int16)
    elif filename.endswith(".complex16u") or filename.endswith(".cu8"):
        return (127.5 * (data.view(np.float32) + 1.0)).astype(np.uint8)
    elif filename.endswith(".complex16s") or filename.endswith(".cs8"):
        return (127.5 * ((data.view(np.float32)) - 0.5 / 127.5)).astype(np.int8)
    else:
        return data
Esempio n. 12
0
 def norm_image(self, arr: np.ndarray):
     """
     将一个numpy数组正则化(0~255),并转成np.uint8类型
     :param arr: 要处理的numpy数组
     :return: 值域在0~255之间的uint8数组
     """
     if not arr.min() == arr.max():
         arr = (arr - arr.min()) / (arr.max() - arr.min()) * 255
     return np.array(arr, dtype=np.uint8)
Esempio n. 13
0
def logsumexp_double_complement(a: np.ndarray, rel_tol: float = 1e-3) -> float:
    """Calculates the following expression in a numerically stable fashion:

        log(1 - (1 - exp(a_0)) x (1 - exp(a_1)) x ...)

    where a_i are the entries of `a` and assumed to be non-positive. The algorithm is as follows:

    We define:

        exp(x_n) = 1 - \prod_{i=0}^n (1 - exp(a_n)),

    Thus, we have x_0 = a_0 and the recursion relation:

        exp(x_{n+1}) = exp(x_n) + exp(b_{n+1}),

    where

        b_{n+1} = a_{n+1} + log(1 - exp(x_n)).

    We sort `a` in the descending order and update `x` term by term. It is easy to show that x_{n} is monotonically
    increasing and that |x_{N} - x_{n}| < (N - n) |x_{n} - x_{n-1}|. We use the last inequality to bound the error
    for early stopping.

    Args:
        a: a float array
        rel_tol: relative error tolerance for early stopping of calculation

    Returns:
        a float scalar
    """
    try:
        assert isinstance(a, np.ndarray)
        a = np.asarray(a.copy(), dtype=np.float)
    except AssertionError:
        try:
            a = np.asarray(a, dtype=np.float)
        except ValueError:
            raise ValueError("The input argument must be castable to a float ndarray.")
    assert len(a) > 0
    assert 0. <= rel_tol < 1.0

    # enforce all entries of a to be negative or zero
    a[a > 0.] = 0.

    if len(a) == 1:
        return np.asscalar(a)
    else:
        a = np.sort(a.flatten())[::-1]
        x = a[0]
        sz = len(a)
        for i, entry in enumerate(a[1:]):
            x_new = np.logaddexp(x, entry + logp_complement(x))
            if np.abs(x_new - x) * (sz - i - 1) < rel_tol * np.abs(x):
                return x_new
            else:
                x = x_new
        return x
Esempio n. 14
0
def save_pfm_texture(filename: str, tex: np.ndarray):
    if tex.dtype != np.float32:
        print('Input is not 32 bit precision: converting to 32 bits.')
        tex = tex.astype(np.float32)
    height, width = tex.shape[0], tex.shape[1]
    with open(filename, 'wb+') as f:
        f.write('{}\n'.format(HEADER_MAGIC).encode())
        f.write('{} {}\n'.format(width, height).encode())
        f.write('-1.0\n'.encode())
        f.write(tex.tobytes())
Esempio n. 15
0
def ink(x: np.ndarray, y: np.ndarray, degree: int, a: int = -3) -> np.ndarray:
    assert _is_integer(degree) and degree > 0, "Degree must be positive integer"
    assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray), "X and Y must be numpy arrays"
    if len(x.shape) == 1:
        x = x.reshape(1, x.shape[0])
    if len(y.shape) == 1:
        y = y.reshape(1, y.shape[0])
    x[x < a] = a
    y[y < a] = a
    return K(x, y, degree, a) / np.sqrt(K_norm(x, y, degree, a, "x") * K_norm(x, y, degree, a, "y"))
Esempio n. 16
0
def new_ink(X: np.ndarray, Y: np.ndarray, degree: int, a: int = -3) -> np.ndarray:
    assert _is_integer(degree) and degree > 0, "Degree must be positive integer"
    assert isinstance(X, np.ndarray) and isinstance(Y, np.ndarray), "X and Y must be numpy arrays"
    if len(X.shape) == 1:
        X = X.reshape(1, X.shape[0])
    if len(Y.shape) == 1:
        Y = Y.reshape(1, Y.shape[0])
    X[X < a] = a
    Y[Y < a] = a
    return new_K(X, Y, degree, a) / new_K_norm(X, Y, degree, a)
def c_int_ext(k: int, aff: np.ndarray, adj_mat: np.ndarray, threads_nb=1) -> float:
    """
    This function calculates the inter/intra-cluster density
    as defined in Santo Fortunato, Community Detection in Graphs, Physics Reports, 486, 75-174(2010)
    Parameters
    ----------
    k : int
        The number of clusters
    aff : np.ndarray
        A 1-D array contains the affectation of nodes to their clusters
    adj_mat : np.ndarray
        Adjacency matrix
    Returns
    -------
    float, float
        The value of sum(sigma_int), sum(sigma_ext)
        which is the quality of the clustering.
    """
    global int_sigmas
    global ext_sigmas
    # initiate to zeros
    int_sigmas = np.zeros(k)
    ext_sigmas = np.zeros(k)
    # Get the number of nodes
    n = len(aff)
    # Calculates the internal and external edges
    # for each cluster
    threads = []  # type: list[Thread]
    # if threads number is too large then update it
    if n / 10 < threads_nb:
        threads_nb = int(n / 10)
    # create threads instances
    for i in range(threads_nb):
        from_i = int(i * (n / threads_nb))
        to_i = int((i + 1) * (n / threads_nb))
        t = Thread(target=calculate_int_ext_edges, args=(adj_mat, aff, from_i, to_i))
        threads.append(t)
        threads[i].start()
    # Wait fo threads to finish
    for t in threads:
        t.join()
    # Transform aff from np.ndarray to list
    # to be able to use the count function
    aff = aff.tolist() # type: list
    # Calculates the density for each cluster
    for i in range(k):
        nb_c_i = aff.count(i)
        if nb_c_i <= 1:
            int_sigmas[i] = 0
            ext_sigmas[i] = 0
        else:
            int_sigmas[i] /= (nb_c_i * (nb_c_i - 1) / 2)
            ext_sigmas[i] /= (nb_c_i * (n - nb_c_i))
    # Return the density for all the clusters
    return sum(int_sigmas) / k, sum(ext_sigmas) / k
Esempio n. 18
0
    def __init__(self, train_x: np.ndarray, train_y: np.ndarray, features_name=None, do_standardization=True):
        # ensure that train_y is (N x 1)
        train_y = train_y.reshape((train_y.shape[0], 1))
        self.train_x = train_x
        self._raw_train_x = train_x.copy()
        self._raw_train_y = train_y.copy()
        self.train_y = train_y
        self.features_name = features_name

        self.do_standardization = do_standardization
        self._x_std_ = None
        self._x_mean_ = None
Esempio n. 19
0
    def __init__(self, name: str, values: np.ndarray, gradients: np.ndarray):
        values = np.asarray(values)
        gradients = np.asarray(gradients)
        if values.shape != gradients.shape:
            raise ValueError("Parameter values and gradients must be the same shape")
        self.shape = values.shape
        self.name = name

        self.parameters = [Parameter(self.name, idx, value, gradient, Delta())
                           for idx, value, gradient in
                           zip(range(values.size), values.flatten(), gradients.flatten())]

        self.parameter_map = {p.name: p for p in self.parameters}
Esempio n. 20
0
    def add_dates(self, dates: np.ndarray) -> None:
        dates.sort()
        for d in dates:
            current_date = d.date()
            if self.start_date > current_date:
                self.start_date = current_date
            else:
                break

        for d in dates[::-1]:
            current_date = d.date()
            if self.end_date < current_date:
                self.end_date = current_date
            else:
                break
Esempio n. 21
0
def check_for_winner(board: np.ndarray) -> int:
    """Who's won? If 3 then X has won if -3, O"""

    # Do a sum along 3 vertical + 3 horizontal + 2 diagonals
    sums_list = []
    vertical_sum = board.sum(axis=0)
    sums_list.append(vertical_sum[np.argmax(abs(vertical_sum))])

    horiz_sum = board.sum(axis=1)
    sums_list.append(horiz_sum[np.argmax(abs(horiz_sum))])

    sums_list.append(board[0, 0] + board[1, 1] + board[2, 2])
    sums_list.append(board[0, 2] + board[1, 1] + board[2, 0])

    return sums_list[np.argmax(np.abs(sums_list))]
Esempio n. 22
0
def sphere2cart(pts: np.ndarray, degrees: bool=False) -> np.ndarray:
    """Convert spherical coordinates to Cartesian coordinates.

    :param ndarray pts: array of spherical coordinates
    :param bool degrees: if true results will be presented in degrees \
        (default: False)
    :returns: [x, y, z]
    :rtype: ndarray

    >>> sphere2cart(np.array([[1, 0, 90], [1, 90, 90]]), degrees=True)
    array([[  1.00000000e+00,   0.00000000e+00,   6.12323400e-17],
           [  6.12323400e-17,   1.00000000e+00,   6.12323400e-17]])
    """
    element_dimension(pts, 3)

    if degrees:
        pts = pts.astype(float)
        pts[:, 1:3] = np.radians(pts[:, 1:3])

    r = pts[:, 0]
    theta = pts[:, 1]
    phi = pts[:, 2]
    x = r * np.sin(phi) * np.cos(theta)
    y = r * np.sin(phi) * np.sin(theta)
    z = r * np.cos(phi)

    return np.c_[x, y, z]
def inverse_additive_log_ratio(Y: np.ndarray, ind=-1):
    """
    Inverse additive log ratio transform.
    """
    assert Y.ndim in [1, 2]
    
    X = Y.copy()
    dimensions = X.shape[X.ndim-1]
    idx = np.arange(0, dimensions+1)
    
    if ind != -1:
        idx = np.array(list(idx[idx < ind]) + 
                       [-1] + 
                       list(idx[idx >= ind+1]-1))
    
    # Add a zero-column and reorder columns
    if Y.ndim == 2:
        X = np.concatenate((X, np.zeros((X.shape[0], 1))), axis=1)
        X = X[:, idx]
    else:
        X = np.append(X, np.array([0]))
        X = X[idx]
    
    # Inverse log and closure operations
    X = np.exp(X)
    X = close(X)
    return X
Esempio n. 24
0
    def package_value(
            self,
            value: np.ndarray,  # data
            name: str,  # NDVar name
            info: dict = None,  # NDVar info
            meas: str = None,  # for NDVar info
    ):
        if not self.yshape:
            return value[0]

        # shape
        has_vector = value.shape[0] > self.yshape[0]
        if self.vector_dim and not has_vector:
            dims = self.ydims[:-1]
            shape = self.yshape[:-1]
        else:
            dims = self.ydims
            shape = self.yshape
        if not dims:
            return value[0]
        elif len(shape) > 1:
            value = value.reshape(shape)

        # info
        if meas:
            info = _info.for_stat_map(meas, old=info)
        elif info is None:
            info = self.y_info

        return NDVar(value, dims, info, name)
Esempio n. 25
0
	def __init__(self, data :np.ndarray , world_ref_center:np.ndarray , axis:np.ndarray):
		self.data_to_world_ref_center = data - world_ref_center
		self.mod_pi = [np.linalg.norm(pi) for pi in axis]
		self.projection_over_pi = []
		self.min = []
		self.max = []
		self.delta_lambda = []
		self.volume = 1.0
		self.box_ref_center = []
		self.dimension = data.shape[1]

		for i in range(0,self.dimension):
			projection_over_pi = np.array(np.dot(self.data_to_world_ref_center, axis[i,:]) / self.mod_pi[i])
			vmin = projection_over_pi.min(0)
			vmax = projection_over_pi.max(0)
			self.projection_over_pi.append(projection_over_pi)
			self.min.append(vmin)
			self.max.append(vmax)
			delta_lambda = (np.array(vmax) - np.array(vmin))/2.0
			self.delta_lambda.append(delta_lambda)
			self.volume *= self.volume * delta_lambda 

		#self.center = center + 0.5 * np.dot(np.array(self.min) + np.array(self.max),axis)
		#o min e max se referem ao centro na coordenada do box, para a coordenada do mundo devemos transformar
		# de acordo com o centro do mundo e a matriz de rotação dada pelos autovetores.
		self.box_ref_center = world_ref_center + 0.5 * np.dot(axis.transpose(),np.array(self.min) + np.array(self.max))
Esempio n. 26
0
    def make_move(self, board: np.ndarray, move: int) -> np.ndarray:
        moving_player = self.get_active_player(board)
        new_board: np.ndarray = board.copy()
        available_idx, = np.where(new_board[:, move] == 0)

        new_board[available_idx[-1]][move] = moving_player
        return new_board
Esempio n. 27
0
def print_board(board: np.ndarray):
    """Print out the board representation in sexy ASCII...
    
    print_board(np.array([[1,0,-1], [0,0,0], [1,-1,0]]))
    
    X |   | O 
    ----------
      |   | 
    ---------
    X | O | 
    """

    li_board = board.tolist()
    print('')
    for i, row in enumerate(li_board):
        t = str(row)
        t = t.replace('[', ' ')
        t = t.replace(']', '')
        t = t.replace(',', ' |')
        t = t.replace('-1', 'O')
        t = t.replace('1', 'X')
        t = t.replace('0', ' ')

        print(t)
        if i < 2:
            print('-----------')
Esempio n. 28
0
def inv_zform(data: np.ndarray,
              out: Optional[np.ndarray] = None,
              clone: bool = True,
              sigma: float = 1,
              mu: float = 1) -> np.ndarray:
    if clone or out is None:
        out = data.copy()
Esempio n. 29
0
def pol2cart(pts: np.ndarray, degrees: bool=False) -> np.ndarray:
    """Convert polar or cylindrical coordinates to Cartesian coordinates.

    :param ndarray pts: array of polar points (rho, theta) or cylindrical \
        points (rho, theta, phi)
    :param bool degrees: if true results will be presented in degrees \
        (default: False)
    :returns: [x, y, (*z*)]
    :rtype: ndarray

    >>> pol2cart(np.array([[2**0.5, 45], [1, 90]]), degrees=True)
    array([[  1.00000000e+00,   1.00000000e+00],
           [  6.12323400e-17,   1.00000000e+00]])

    >>> pol2cart(np.array([[2**0.5, 45, 1], [1, 90, 2]]), degrees=True)
    array([[  1.00000000e+00,   1.00000000e+00,   1.00000000e+00],
           [  6.12323400e-17,   1.00000000e+00,   2.00000000e+00]])
    """
    dim = element_dimension(pts, [2, 3])

    if degrees:
        pts = pts.astype(float)
        pts[:, 1] = np.radians(pts[:, 1])

    x = pts[:, 0] * np.cos(pts[:, 1])
    y = pts[:, 0] * np.sin(pts[:, 1])

    if dim == 2:
        return np.c_[x, y]
    else:
        return np.c_[x, y, pts[:, 2]]
Esempio n. 30
0
    def find_stones(self, img: np.ndarray, rs=0, re=gsize, cs=0, ce=gsize, **kwargs):
        """ The stones detection main algorithm, which is based on k-means pixel clustering.

        Note: the three colors (E, B, W) must be present in the image for this statistical method to work.

        Args:
            img: ndarray
                The Goban image.
            rs: int - inclusive
            re: int - exclusive
                Row start and end indexes. Can be used to restrain check to a subregion.
            cs: int - inclusive
            ce: int - exclusive
                Column start and end indexes. Can be used to restrain check to a subregion.
            kwargs:
                Allowing for keyword args enables multiple find methods to be called indifferently. See SfMeta.

        Returns stones: ndarray
            A matrix containing the detected stones in the desired subregion of the image,
            or None if the result could not be trusted or something failed.
        """
        if img.dtype is not np.float32:
            img = img.astype(np.float32)
        ratios, centers = self.cluster_colors(img, rs=rs, re=re, cs=cs, ce=ce)
        stones = self.interpret_ratios(ratios, centers, r_start=rs, r_end=re, c_start=cs, c_end=ce)
        if not self.check_density(stones):
            return None  # don't trust this result
        return stones
Esempio n. 31
0
 def _build_contributions(data: np.ndarray, index: int,
                          axis: int) -> np.ndarray:
     return data.take(index, axis=axis)
Esempio n. 32
0
 def elu(values: np.ndarray, alpha: float):
     values = values.astype(float)
     for index, x in np.ndenumerate(values):
         if x < 0:
             values[index] = alpha * (np.ma.exp(x) - 1)
     return values
Esempio n. 33
0
 def thresholded_relu(values: np.ndarray, alpha: float):
     values = values.astype(float)
     for index, x in np.ndenumerate(values):
         values[index] = values[index] * (x > alpha)
     return values
Esempio n. 34
0
 def predict(self, x: np.ndarray):
     return x.reshape(x.shape[0], -1)
Esempio n. 35
0
def quantil_mask(
        signal: np.ndarray,
        quantil=[0.1, -0.9],
        *,
        sensor_axis=None,
        axis=(-2),
        weight: float=0.999,
) -> np.ndarray:
    """

    Args:
        signal:
        quantil: pos for speech, negative for noise
        sensor_axis:
        axis: Suggestion: time axis, Alternative time and frequency axis
        weight:

    Returns:
        Mask of shape [*quantil.shape, *signal.shape]

    """
    signal = np.abs(signal)

    if isinstance(quantil, (tuple, list)):
        return np.array([quantil_mask(signal=signal, sensor_axis=sensor_axis, axis=axis, quantil=q, weight=weight) for q in quantil])

    if sensor_axis is not None:
        signal = signal.sum(axis=sensor_axis, keepdims=True)

    if not isinstance(axis, (tuple, list)):
        axis = (axis,)

    # Convert signal to 2D with [independent, sample axis]
    tmp_axis = tuple([-i - 1 for i in range(len(axis))])
    signal = np.moveaxis(signal, axis, tmp_axis)
    shape = signal.shape
    working_shape = tuple(
        [np.prod(shape[:-len(tmp_axis)]), np.prod(shape[-len(tmp_axis):])])
    signal = np.reshape(signal, working_shape)

    if quantil >= 0:
        threshold = np.percentile(signal, q=(1 - quantil)*100, axis=-1)
    else:
        threshold = np.percentile(signal, q=abs(quantil)*100, axis=-1)

    mask = np.zeros_like(signal)
    for i in range(mask.shape[0]):
        if quantil >= 0:
            mask[i, :] = signal[i, :] > threshold[i]
        else:
            mask[i, :] = signal[i, :] < threshold[i]

    # Drop this line?
    mask = 0.5 + weight * (mask - 0.5)

    # Restore original shape
    mask = np.moveaxis(mask.reshape(shape), tmp_axis, axis)

    if sensor_axis is not None:
        mask = np.squeeze(mask, axis=sensor_axis)
    return mask
Esempio n. 36
0
    def train(self,
              images: np.ndarray,
              segmented_images: np.ndarray,
              save_name,
              load_dir=None):
        """
        Trains the network with given images
        :param images: original images
        :param segmented_images: tmp_data
        :return: None
        """

        self.data_dimensions = segmented_images.shape
        self.dataset = self.createData(images, segmented_images)
        model_name = save_name
        epoch = 0

        if load_dir is not None:
            self.logger.info(f"Loading from {load_dir}")
            self._load(load_dir)
            model_name, epoch = self._parse_dir(load_dir)

        if self.model is None:
            ## load_dir might not have been specified, or load_dir is incorrect
            self.logger.info(
                f"New model instance created on device {config.train_device}")
            if self.network_type == 0:
                self.model = UNet_final(args.compressed_size).to(
                    device=config.train_device, dtype=None, non_blocking=False)
            elif self.network_type == 1:
                self.model = UNet_compressed(args.compressed_size).to(
                    device=config.train_device, dtype=None, non_blocking=False)

        distance = nn.MSELoss()
        optimizer = torch.optim.Adam(self.model.parameters(),
                                     lr=args.learning_rate,
                                     weight_decay=args.l2_reg)
        st = time.perf_counter()

        self.logger.info("Training the network....")
        for ep in range(epoch, args.total_epochs):
            for i, images in enumerate(self.dataset):
                images = images.to(config.train_device)
                images_input = images[:, :3, :, :]
                images_output = images[:, 3:, :, :]
                compressed, final = self.model(images_input)

                optimizer.zero_grad()
                loss = distance(final, images_output)
                loss.backward()
                optimizer.step()

            self.logger.info(
                'epoch [{}/{}], loss:{:.4f}, time elapsed:{:.4f} (sec)'.format(
                    ep, args.total_epochs, loss.data,
                    time.perf_counter() - st))
            st = time.perf_counter()

            if ep % 30 == 0:
                self._save(save_name, ep)

        self._save(save_name, args.total_epochs)
        self.logger.info(
            f"Finished training the network and save as {save_name+'-epoch'+str(args.total_epochs)+'.pth'}"
        )
        return None
Esempio n. 37
0
    def _train(self, X: np.ndarray, y: np.ndarray):
        """Trains the random forest on X and y.

        Parameters
        ----------
        X : np.ndarray [n_samples, n_features (config + instance features)]
            Input data points.
        Y : np.ndarray [n_samples, ]
            The corresponding target values.

        Returns
        -------
        self
        """

        self.X = X
        self.y = y.flatten()

        from smac.epm.gp_kernels import ConstantKernel, Matern, WhiteKernel, HammingKernel
        from smac.epm.gp_base_prior import HorseshoePrior, LognormalPrior

        self.rf = sklearn.ensemble.RandomForestRegressor(
            max_features=1,
            bootstrap=True,
            max_depth=2,
            min_samples_leaf=5,
            n_estimators=N_EST,
        )
        self.rf.fit(X, np.log(y - np.min(y) + 1e-7).ravel())
        indicators = np.array(self.rf.apply(X))
        all_datasets = []
        all_targets = []
        all_mappings = []
        for est in range(N_EST):
            unique = np.unique(indicators[:, est])
            mapping = {j: i for i, j in enumerate(unique)}
            datasets = [[] for _ in unique]
            targets = [[] for _ in indicators]
            for indicator, x, y_ in zip(indicators[:, est], X, y):
                index = mapping[indicator]
                datasets[index].append(x)
                targets[index].append(y_)
            all_mappings.append(mapping)
            all_datasets.append(datasets)
            all_targets.append(targets)

        # print('Before')
        # for est in range(N_EST):
        #     for dataset in all_datasets[est]:
        #         print(len(dataset))

        for est in range(N_EST):
            n_nodes = self.rf.estimators_[est].tree_.node_count
            children_left = self.rf.estimators_[est].tree_.children_left
            children_right = self.rf.estimators_[est].tree_.children_right
            feature = self.rf.estimators_[est].tree_.feature
            threshold = self.rf.estimators_[est].tree_.threshold

            # The tree structure can be traversed to compute various properties such
            # as the depth of each node and whether or not it is a leaf.
            node_depth = np.zeros(shape=n_nodes, dtype=np.int64)
            is_leaves = np.zeros(shape=n_nodes, dtype=bool)
            stack = [(0, -1)]  # seed is the root node id and its parent depth
            while len(stack) > 0:
                node_id, parent_depth = stack.pop()
                node_depth[node_id] = parent_depth + 1

                # If we have a test node
                if (children_left[node_id] != children_right[node_id]):
                    stack.append((children_left[node_id], parent_depth + 1))
                    stack.append((children_right[node_id], parent_depth + 1))
                else:
                    is_leaves[node_id] = True

            rules = {}
            import copy

            def extend(rule, idx):
                if is_leaves[idx]:
                    rules[idx] = rule
                else:
                    rule_left = copy.deepcopy(rule)
                    rule_left.append((threshold[idx], '<=', feature[idx]))
                    extend(rule_left, children_left[idx])
                    rule_right = copy.deepcopy(rule)
                    rule_right.append((threshold[idx], '>', feature[idx]))
                    extend(rule_right, children_right[idx])

            extend([], 0)
            #print(rules)

            for key, rule in rules.items():
                lower = -np.ones((X.shape[1], )) * np.inf
                upper = np.ones((X.shape[1], )) * np.inf
                for element in rule:
                    if element[1] == '<=':
                        if element[0] < upper[element[2]]:
                            upper[element[2]] = element[0]
                    else:
                        if element[0] > lower[element[2]]:
                            lower[element[2]] = element[0]

                for feature_idx in range(X.shape[1]):
                    closest_lower = -np.inf
                    closes_lower_idx = None
                    closest_upper = np.inf
                    closest_upper_idx = None
                    for x in X:
                        if x[feature_idx] > lower[feature_idx] and x[
                                feature_idx] < upper[feature_idx]:
                            continue
                        if x[feature_idx] <= lower[feature_idx]:
                            if x[feature_idx] > closest_lower:
                                closest_lower = x[feature_idx]
                                closes_lower_idx = feature_idx
                        if x[feature_idx] >= upper[feature_idx]:
                            if x[feature_idx] < closest_upper:
                                closest_upper = x[feature_idx]
                                closest_upper_idx = feature_idx

                    if closest_upper_idx is not None:
                        all_datasets[est][all_mappings[est][key]].append(
                            X[closest_upper_idx])
                        all_targets[est][all_mappings[est][key]].append(
                            y[closest_upper_idx])
                    if closes_lower_idx is not None:
                        all_datasets[est][all_mappings[est][key]].append(
                            X[closes_lower_idx])
                        all_targets[est][all_mappings[est][key]].append(
                            y[closes_lower_idx])

        # print('After')
        # for est in range(N_EST):
        #     for dataset in all_datasets[est]:
        #         print(len(dataset))

        self.all_mappings = all_mappings
        self.models = []
        for est in range(N_EST):
            models = []
            for dataset, targets_ in zip(all_datasets[est], all_targets[est]):

                cov_amp = ConstantKernel(
                    2.0,
                    constant_value_bounds=(np.exp(-10), np.exp(2)),
                    prior=LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng),
                )

                cont_dims = np.nonzero(self.types == 0)[0]
                cat_dims = np.nonzero(self.types != 0)[0]

                if len(cont_dims) > 0:
                    exp_kernel = Matern(
                        np.ones([len(cont_dims)]),
                        [(np.exp(-10), np.exp(2))
                         for _ in range(len(cont_dims))],
                        nu=2.5,
                        operate_on=cont_dims,
                    )

                if len(cat_dims) > 0:
                    ham_kernel = HammingKernel(
                        np.ones([len(cat_dims)]),
                        [(np.exp(-10), np.exp(2))
                         for _ in range(len(cat_dims))],
                        operate_on=cat_dims,
                    )

                noise_kernel = WhiteKernel(
                    noise_level=1e-8,
                    noise_level_bounds=(np.exp(-25), np.exp(2)),
                    prior=HorseshoePrior(scale=0.1, rng=self.rng),
                )

                if len(cont_dims) > 0 and len(cat_dims) > 0:
                    # both
                    kernel = cov_amp * (exp_kernel * ham_kernel) + noise_kernel
                elif len(cont_dims) > 0 and len(cat_dims) == 0:
                    # only cont
                    kernel = cov_amp * exp_kernel + noise_kernel
                elif len(cont_dims) == 0 and len(cat_dims) > 0:
                    # only cont
                    kernel = cov_amp * ham_kernel + noise_kernel
                else:
                    raise ValueError()

                gp = GaussianProcess(
                    configspace=self.configspace,
                    types=self.types,
                    bounds=self.bounds,
                    kernel=kernel,
                    normalize_y=True,
                    seed=self.rng.randint(low=0, high=10000),
                )
                gp.train(np.array(dataset), np.array(targets_))
                models.append(gp)
            self.models.append(models)
        return self
Esempio n. 38
0
    def loc_to_iloc(cls,
                    *,
                    label_to_pos: tp.Dict,
                    labels: np.ndarray,
                    positions: np.ndarray,
                    key: GetItemKeyType,
                    offset: tp.Optional[int] = None) -> GetItemKeyType:
        '''
        Note: all SF objects (Series, Index) need to be converted to basic types before being passed as `key` to this function.

        Args:
            offset: in the contect of an IndexHierarchical, the iloc positions returned from this funcition need to be shifted.
        Returns:
            An integer mapped slice, or GetItemKey type that is based on integers, compatible with TypeBlocks
        '''
        offset_apply = not offset is None

        # ILoc is handled prior to this call, in the Index.loc_to_iloc method

        if isinstance(key, slice):
            if offset_apply and key == NULL_SLICE:
                # when offset is defined (even if it is zero), null slice is not sufficiently specific; need to convert to an explict slice relative to the offset
                return slice(offset, len(positions) + offset)
            try:
                return slice(
                    *cls.map_slice_args(label_to_pos.get, key, labels, offset))
            except LocEmpty:
                return EMPTY_SLICE

        if isinstance(key, np.datetime64):
            # convert this to the target representation, do a Boolean selection
            if labels.dtype != key.dtype:
                key = labels.astype(key.dtype) == key
            # if not different type, keep it the same so as to do a direct, single element selection

        # handles only lists and arrays; break out comparisons to avoid multiple
        # if isinstance(key, KEY_ITERABLE_TYPES):
        is_array = isinstance(key, np.ndarray)
        is_list = isinstance(key, list)

        # can be an iterable of labels (keys) or an iterable of Booleans
        if is_array or is_list:
            if is_array and key.dtype.kind == DTYPE_DATETIME_KIND:
                if labels.dtype != key.dtype:
                    labels_ref = labels.astype(key.dtype)
                    # let Boolean key hit next branch
                    key = reduce(operator_mod.or_,
                                 (labels_ref == k for k in key))
                    # NOTE: may want to raise instead of support this
                    # raise NotImplementedError(f'selecting {labels.dtype} with {key.dtype} is not presently supported')

            if is_array and key.dtype == bool:
                if offset_apply:
                    return positions[key] + offset
                return positions[key]

            # map labels to integer positions
            # NOTE: we may miss the opportunity to get a reference from values when we have contiguous keys
            if offset_apply:
                return [label_to_pos[x] + offset for x in key]
            return [label_to_pos[x] for x in key]

        # if a single element (an integer, string, or date, we just get the integer out of the map
        if offset_apply:
            return label_to_pos[key] + offset
        return label_to_pos[key]
def relative_ce(y_true: np.ndarray, y_pred: np.ndarray) -> float:
    """Calculate relative cross-entropy."""
    naive_pred = np.ones_like(y_true) * y_true.mean()
    ce_naive_pred = log_loss(y_true=y_true, y_pred=naive_pred)
    ce_y_pred = log_loss(y_true=y_true, y_pred=y_pred)
    return 1.0 - (ce_y_pred / ce_naive_pred)
Esempio n. 40
0
def is_diagonal(mat: np.ndarray) -> bool:
    r"""
    Determine if a matrix is diagonal [WIKDIA]_.

    A matrix is diagonal if the matrix is square and if the diagonal of the
    matrix is non-zero, while the off-diagonal elements are all zero.

    The following is an example of a 3-by-3 diagonal matrix:

    .. math::
        \begin{equation}
            \begin{pmatrix}
                1 & 0 & 0 \\
                0 & 2 & 0 \\
                0 & 0 & 3
            \end{pmatrix}
        \end{equation}

    This quick implementation is given by Daniel F. from StackOverflow in
    [SODIA]_.

    Examples
    ==========

    Consider the following diagonal matrix

    .. math::
        A = \begin{pmatrix}
                                1 & 0 \\
                                0 & 1
                           \end{pmatrix}.

    Our function indicates that this is indeed a diagonal matrix:

    >>> from toqito.linear_algebra.properties.is_diagonal import is_diagonal
    >>> import numpy as np
    >>> A = np.array([[1, 0], [0, 1]])
    >>> is_diagonal(A)
    True

    Alternatively, the following example matrix

    .. math::
        B = \begin{pmatrix}
                                1 & 2 \\
                                3 & 4
                             \end{pmatrix}

    is not diagonal, as shown using `toqito`

    >>> from toqito.linear_algebra.properties.is_diagonal import is_diagonal
    >>> import numpy as np
    >>> B = np.array([[1, 2], [3, 4]])
    >>> is_diagonal(B)
    False

    References
    ==========
    .. [WIKDIA] Wikipedia: Diagonal matrix
        https://en.wikipedia.org/wiki/Diagonal_matrix

    .. [SODIA] StackOverflow post
        https://stackoverflow.com/questions/43884189/

    :param mat: The matrix to check.
    :return: Returns True if the matrix is diagonal and False otherwise.
    """
    if not is_square(mat):
        return False
    i, j = mat.shape
    test = mat.reshape(-1)[:-1].reshape(i - 1, j + 1)
    return ~np.any(test[:, 1:])
Esempio n. 41
0
def _md5(a: np.ndarray) -> str:
    """Get md5 hash of a numpy array."""
    return md5(a.tobytes()).hexdigest()
Esempio n. 42
0
 def train(self, vecs: np.ndarray, *args, **kwargs):
     vecs = vecs.reshape([-1, vecs.shape[-1]])
     assert len(
         vecs
     ) > self.num_clusters, 'number of data should be larger than number of clusters'
     self.kmeans_train(vecs)
Esempio n. 43
0
    def activate(x: np.ndarray, copy: bool = False) -> np.ndarray:
        if copy: y = x.copy()
        else: y = x

        y[x <= 0.] *= Leaky.LEAKY_COEF
        return y
Esempio n. 44
0
def soft_convert_objects(
    values: np.ndarray,
    datetime: bool = True,
    numeric: bool = True,
    timedelta: bool = True,
    coerce: bool = False,
    copy: bool = True,
):
    """ if we have an object dtype, try to coerce dates and/or numbers """

    validate_bool_kwarg(datetime, "datetime")
    validate_bool_kwarg(numeric, "numeric")
    validate_bool_kwarg(timedelta, "timedelta")
    validate_bool_kwarg(coerce, "coerce")
    validate_bool_kwarg(copy, "copy")

    conversion_count = sum((datetime, numeric, timedelta))
    if conversion_count == 0:
        raise ValueError(
            "At least one of datetime, numeric or timedelta must be True.")
    elif conversion_count > 1 and coerce:
        raise ValueError("Only one of 'datetime', 'numeric' or "
                         "'timedelta' can be True when when coerce=True.")

    if not is_object_dtype(values.dtype):
        # If not object, do not attempt conversion
        values = values.copy() if copy else values
        return values

    # If 1 flag is coerce, ensure 2 others are False
    if coerce:
        # Immediate return if coerce
        if datetime:
            from pandas import to_datetime

            return to_datetime(values, errors="coerce").to_numpy()
        elif timedelta:
            from pandas import to_timedelta

            return to_timedelta(values, errors="coerce").to_numpy()
        elif numeric:
            from pandas import to_numeric

            return to_numeric(values, errors="coerce")

    # Soft conversions
    if datetime:
        # GH 20380, when datetime is beyond year 2262, hence outside
        # bound of nanosecond-resolution 64-bit integers.
        try:
            values = lib.maybe_convert_objects(values, convert_datetime=True)
        except OutOfBoundsDatetime:
            pass

    if timedelta and is_object_dtype(values.dtype):
        # Object check to ensure only run if previous did not convert
        values = lib.maybe_convert_objects(values, convert_timedelta=True)

    if numeric and is_object_dtype(values.dtype):
        try:
            converted = lib.maybe_convert_numeric(values,
                                                  set(),
                                                  coerce_numeric=True)
        except (ValueError, TypeError):
            pass
        else:
            # If all NaNs, then do not-alter
            values = converted if not isna(converted).all() else values
            values = values.copy() if copy else values

    return values
Esempio n. 45
0
def int16_to_float(audio: np.ndarray) -> np.ndarray:
    assert np.issubdtype(audio.dtype, np.int)
    return audio.astype(np.float32) / (2**15 - 1)
Esempio n. 46
0
def addVerticesAndTriangles(voxel_model_array: np.ndarray,
                            verts_indices: np.ndarray, model_offsets: Tuple,
                            x: int, y: int, z: int, vi: int):
    """
    Find the applicable mesh vertices and triangles for a target voxel.

    Args:
        voxel_model_array: VoxelModel.voxels
        verts_indices: verts indices array
        model_offsets: VoxelModel.coords
        x: Target voxel X location
        y: Target voxel Y location
        z: Target voxel Z location
        vi: Current vertex index

    Returns:
        New verts, Updated verts indices array, New tris, Updated current vert index
    """
    adjacent = [[
        check_adjacent(voxel_model_array, x, y, z, 1, 0, 0),
        check_adjacent(voxel_model_array, x, y, z, -1, 0, 0)
    ],
                [
                    check_adjacent(voxel_model_array, x, y, z, 0, 1, 0),
                    check_adjacent(voxel_model_array, x, y, z, 0, -1, 0)
                ],
                [
                    check_adjacent(voxel_model_array, x, y, z, 0, 0, 1),
                    check_adjacent(voxel_model_array, x, y, z, 0, 0, -1)
                ]]

    cube_verts_indices = np.array([0, 0, 0, 0, 0, 0, 0, 0])
    verts = []
    tris = []

    if not adjacent[0][0] or not adjacent[1][0] or not adjacent[2][0]:
        vert_pos = (x + 1, y + 1, z + 1)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[0] = verts_indices[vert_pos]

    if not adjacent[0][0] or not adjacent[1][1] or not adjacent[2][0]:
        vert_pos = (x + 1, y, z + 1)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[1] = verts_indices[vert_pos]

    if not adjacent[0][1] or not adjacent[1][0] or not adjacent[2][0]:
        vert_pos = (x, y + 1, z + 1)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[2] = verts_indices[vert_pos]

    if not adjacent[0][1] or not adjacent[1][1] or not adjacent[2][0]:
        vert_pos = (x, y, z + 1)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[3] = verts_indices[vert_pos]

    if not adjacent[0][0] or not adjacent[1][0] or not adjacent[2][1]:
        vert_pos = (x + 1, y + 1, z)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[4] = verts_indices[vert_pos]

    if not adjacent[0][0] or not adjacent[1][1] or not adjacent[2][1]:
        vert_pos = (x + 1, y, z)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[5] = verts_indices[vert_pos]

    if not adjacent[0][1] or not adjacent[1][0] or not adjacent[2][1]:
        vert_pos = (x, y + 1, z)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[6] = verts_indices[vert_pos]

    if not adjacent[0][1] or not adjacent[1][1] or not adjacent[2][1]:
        vert_pos = (x, y, z)
        if verts_indices[vert_pos] < 1:
            verts_indices[vert_pos] = vi
            verts.append([
                vert_pos[0] + model_offsets[0], vert_pos[1] + model_offsets[1],
                vert_pos[2] + model_offsets[2]
            ])
            vi = vi + 1
        cube_verts_indices[7] = verts_indices[vert_pos]

    if not adjacent[0][0]:
        tris.append([
            cube_verts_indices[0] - 1, cube_verts_indices[1] - 1,
            cube_verts_indices[5] - 1
        ])
        tris.append([
            cube_verts_indices[5] - 1, cube_verts_indices[4] - 1,
            cube_verts_indices[0] - 1
        ])

    if not adjacent[1][0]:
        tris.append([
            cube_verts_indices[2] - 1, cube_verts_indices[0] - 1,
            cube_verts_indices[4] - 1
        ])
        tris.append([
            cube_verts_indices[4] - 1, cube_verts_indices[6] - 1,
            cube_verts_indices[2] - 1
        ])

    if not adjacent[2][0]:
        tris.append([
            cube_verts_indices[1] - 1, cube_verts_indices[0] - 1,
            cube_verts_indices[2] - 1
        ])
        tris.append([
            cube_verts_indices[2] - 1, cube_verts_indices[3] - 1,
            cube_verts_indices[1] - 1
        ])

    if not adjacent[0][1]:
        tris.append([
            cube_verts_indices[3] - 1, cube_verts_indices[2] - 1,
            cube_verts_indices[6] - 1
        ])
        tris.append([
            cube_verts_indices[6] - 1, cube_verts_indices[7] - 1,
            cube_verts_indices[3] - 1
        ])

    if not adjacent[1][1]:
        tris.append([
            cube_verts_indices[1] - 1, cube_verts_indices[3] - 1,
            cube_verts_indices[7] - 1
        ])
        tris.append([
            cube_verts_indices[7] - 1, cube_verts_indices[5] - 1,
            cube_verts_indices[1] - 1
        ])

    if not adjacent[2][1]:
        tris.append([
            cube_verts_indices[4] - 1, cube_verts_indices[5] - 1,
            cube_verts_indices[7] - 1
        ])
        tris.append([
            cube_verts_indices[7] - 1, cube_verts_indices[6] - 1,
            cube_verts_indices[4] - 1
        ])

    return verts, verts_indices.astype(np.uint32), tris, vi
Esempio n. 47
0
def mean_std(obs: np.ndarray, weights: np.ndarray):
    return std(obs, weights) / np.sqrt(weights.sum())
Esempio n. 48
0
def _interpolate_array(
        dataset: np.ndarray,
        feature_index: Union[int, str],  # yapf: disable
        treat_as_categorical: bool,
        steps_number: Union[int, None]) -> Tuple[np.ndarray, np.ndarray]:
    """
    Generates a 3-D array with interpolated values for the selected feature.

    If the selected feature is numerical the interpolated values are a
    numerical array with evenly spaced numbers between the minimum and the
    maximum value in that column. Otherwise, when the feature is categorical
    the interpolated values are all the unique elements of the that column.

    To get the interpolation the original 2-D dataset is stacked on top of
    itself the number of times equal to the number of desired interpolation
    samples. Then, for every copy of that dataset the selected feature is fixed
    to consecutive values of the interpolated array (the same value for the
    whole copy of the dataset).

    Parameters
    ----------
    dataset : numpy.ndarray
        A dataset based on which interpolation will be done.
    feature_index : Union[integer, string]
        An index of the feature column in the input dataset for which the
        interpolation will be computed.
    treat_as_categorical : boolean
        Whether to treat the selected feature as categorical or numerical.
    steps_number : Union[integer, None]
        The number of evenly spaced samples between the minimum and the maximum
        value of the selected feature for which the model's prediction will be
        evaluated. This parameter applies only to numerical features, for
        categorical features regardless whether it is a number or ``None``, it
        will be ignored.

    Returns
    -------
    interpolated_data : numpy.ndarray
        Numpy array of shape (n_samples, steps_number, n_features) -- where the
        (n_samples, n_features) is the dimension of the input ``dataset`` --
        holding the input ``dataset`` augmented with the interpolated values.
    interpolated_values : numpy.ndarray
        A 1-dimensional array of shape (steps_number, ) holding the
        interpolated values. If a numerical column is selected this will be a
        series of uniformly distributed ``steps_number`` values between the
        minimum and the maximum value of that column. For categorical (textual)
        columns it will hold all the unique values from that column.
    """
    assert isinstance(dataset, np.ndarray), 'Dataset -> numpy array.'
    assert isinstance(feature_index, (int, str)), 'Feature index -> str/ int.'
    assert isinstance(treat_as_categorical, bool), 'As categorical -> bool.'
    assert steps_number is None or isinstance(steps_number, int), \
        'Steps number -> None/ int.'

    is_structured = fuav.is_structured_array(dataset)

    if is_structured:
        column = dataset[feature_index]
    else:
        column = dataset[:, feature_index]

    if treat_as_categorical:
        interpolated_values = np.unique(column)
        interpolated_values.sort()
        # Ignoring steps number -- not needed for categorical.
        steps_number = interpolated_values.shape[0]
    else:
        assert isinstance(steps_number, int), 'Steps number must be an int.'
        interpolated_values = np.linspace(column.min(), column.max(),
                                          steps_number)

        # Give float type to this column if it is a structured array
        if (is_structured
                and dataset.dtype[feature_index] != interpolated_values.dtype):
            new_types = []
            for name in dataset.dtype.names:
                if name == feature_index:
                    dtype = fuat.generalise_dtype(interpolated_values.dtype,
                                                  dataset.dtype[name])
                    new_types.append((name, dtype))
                else:
                    new_types.append((name, dataset.dtype[name]))
            dataset = dataset.astype(new_types)
        elif not is_structured and dataset.dtype != interpolated_values.dtype:
            dtype = fuat.generalise_dtype(interpolated_values.dtype,
                                          dataset.dtype)
            dataset = dataset.astype(dtype)

    interpolated_data = np.repeat(dataset[:, np.newaxis], steps_number, axis=1)
    assert len(interpolated_values) == steps_number, 'Required for broadcast.'
    if is_structured:
        for idx in range(steps_number):
            # Broadcast the new value.
            interpolated_data[:, idx][feature_index] = interpolated_values[idx]
    else:
        # Broadcast the new vector.
        interpolated_data[:, :, feature_index] = interpolated_values

    return interpolated_data, interpolated_values
Esempio n. 49
0
def compute_mean_accuracy(cm: np.ndarray) -> float:
    """ compute the mean of true positive rate and true negative rate from a confusion matrix """
    cm = cm.astype('float') / get_denominator(cm.sum(axis=1)[:, np.newaxis])
    tp = cm[1, 1]
    tn = cm[0, 0]
    return np.mean([tp, tn])
Esempio n. 50
0
 def set_state(self, state: numpy.ndarray):
     """Set the state of the retro environment."""
     raw_state = state.tobytes()
     self.gym_env.em.set_state(raw_state)
     return state
Esempio n. 51
0
def var(obs: np.ndarray, weights: np.ndarray) -> np.float:
    return demeaned_sumsquares(obs, weights) / (weights.sum() - 1)
Esempio n. 52
0
 def predict(self, input_data: np.ndarray) -> torch.Tensor:
     processed_data = self.preprocess_observation(
         input_data.reshape(-1, self.num_states))
     self.__model.train(mode=False)
     return self.__model(processed_data)
Esempio n. 53
0
def mean(obs: np.ndarray, weights: np.ndarray) -> np.float:
    return np.divide(sum(obs, weights), weights.sum())
Esempio n. 54
0
 def backward(self, diff: np.ndarray, back_diff=True):
     return diff.reshape(*self.x_shape)
Esempio n. 55
0
def psnr(gt: np.ndarray, pred: np.ndarray) -> np.ndarray:
    """Compute Peak Signal to Noise Ratio metric (PSNR)"""
    return peak_signal_noise_ratio(gt, pred, data_range=gt.max())
Esempio n. 56
0
 def forward(self, x: np.ndarray):
     self.x_shape = x.shape
     return x.reshape(x.shape[0], -1)
    def _classify_eyemovements(self, gaze: np.ndarray,
                               timestamps: np.ndarray,
                               velocity: np.ndarray,
                               **kwargs) -> Tuple[np.ndarray, List[float]]:
        """
        Get list of eye movements as fixations or saccades.
        """
        # if [2, n_samples] -> change to [n_samples, 2]
        if gaze.shape[0] < gaze.shape[1]:
            gaze = gaze.reshape(gaze.shape[1], gaze.shape[0])
        n, m = gaze.shape

        movements = np.zeros((n,), dtype=np.int32)  # all eye movements detected
        stats = []

        # detect saccades
        detected_saccades = self._detect_saccades(timestamps, velocity)
        cleaned_saccades = self._clean_short_saccades(detected_saccades, timestamps)
        for i, m in enumerate(movements):
            if i in list(chain.from_iterable(cleaned_saccades)):
                movements[i] = GazeState.saccade
            else:
                movements[i] = GazeState.unknown

        start = 0  # current window start position
        end = 0  # current window end position
        fix_marked = False  # fixation found flag
        window_size = self._window_size  # instantiate local variable (it can be changed!)

        # fixations and sp identification
        while (end < len(gaze) - 1) and (start < len(gaze) - 1):

            # Calculation window
            if (start == 0) or (start == end):  # first point
                g = gaze[start: start + window_size]
                end = start + window_size

            elif fix_marked:  # last time found a fix, then take full new window
                if (start + window_size) >= len(gaze):  # tail
                    g = gaze[start:]
                    window_size = len(gaze) - start - 1
                    end = len(gaze)
                else:
                    g = gaze[start: start + window_size]
                    end = start + window_size
                # reset fix flg
                fix_marked = False

            else:  # otherwize add one element from left side od array
                if movements[start] != GazeState.saccade:
                    g = np.append(g, gaze[start].reshape(1, 2), axis=0)
                    end += 1

            # Re-calc dispersion
            dispersion = self._count_dispersion(g)
            stats.append(dispersion)
            # fixation
            if dispersion < self._dispersion_threshold:
                while (dispersion < self._dispersion_threshold) and (end + 1 < len(gaze)):
                    end += 1
                    g = np.append(g, gaze[end].reshape(1, 2), axis=0)
                    dispersion = self._count_dispersion(g)
                fix_marked = True
                # mark as fixation
                for i in range(start, end, 1):
                    if (i < len(movements)) and (movements[i] != GazeState.saccade):
                        movements[i] = GazeState.fixation
                start = end
            # sp
            else:
                # mark as sp
                if movements[start] != GazeState.saccade:
                    movements[start] = GazeState.sp
                start += 1

        return movements, stats
Esempio n. 58
0
def normalize(arr: np.ndarray) -> np.ndarray:
    return (arr - arr.min()) / (arr.max() - arr.min())
def plt_rgb(img:np.ndarray):
    img_np = img.copy().astype('uint8')
    plt.imshow(img_np)
Esempio n. 60
0
    def activate(x: np.ndarray, copy: bool = False) -> np.ndarray:
        if copy: y = x.copy()
        else: y = x

        y[x <= 0.] = 0
        return y + .1 * x