コード例 #1
0
ファイル: domain_objects.py プロジェクト: DomenicD/domenic
    def __init__(self, name: str, values: np.ndarray, gradients: np.ndarray):
        values = np.asarray(values)
        gradients = np.asarray(gradients)
        if values.shape != gradients.shape:
            raise ValueError("Parameter values and gradients must be the same shape")
        self.shape = values.shape
        self.name = name

        self.parameters = [Parameter(self.name, idx, value, gradient, Delta())
                           for idx, value, gradient in
                           zip(range(values.size), values.flatten(), gradients.flatten())]

        self.parameter_map = {p.name: p for p in self.parameters}
コード例 #2
0
ファイル: nphusl.py プロジェクト: TadLeonard/husl-numpy
def _f(y_nd: ndarray) -> ndarray:
    y_flat = y_nd.flatten()
    f_flat = np.zeros(y_flat.shape, dtype=np.float)
    gt = y_flat > constants.EPSILON
    f_flat[gt] = (y_flat[gt] / constants.REF_Y) ** (1.0 / 3.0) * 116 - 16
    f_flat[~gt] = (y_flat[~gt] / constants.REF_Y) * constants.KAPPA
    return f_flat.reshape(y_nd.shape)
コード例 #3
0
ファイル: nphusl.py プロジェクト: TadLeonard/husl-numpy
def _f_inv(l_nd: ndarray) -> ndarray:
    l_flat = l_nd.flatten()
    large = l_nd > 8
    small = ~large
    out = np.zeros(l_flat.shape, dtype=np.float)
    out[large] = constants.REF_Y * (((l_nd[large] + 16) / 116) ** 3.0)
    out[small] = constants.REF_Y * l_nd[small] / constants.KAPPA
    return out.reshape(l_nd.shape)
コード例 #4
0
ファイル: math.py プロジェクト: broadinstitute/gatk
def logsumexp_double_complement(a: np.ndarray, rel_tol: float = 1e-3) -> float:
    """Calculates the following expression in a numerically stable fashion:

        log(1 - (1 - exp(a_0)) x (1 - exp(a_1)) x ...)

    where a_i are the entries of `a` and assumed to be non-positive. The algorithm is as follows:

    We define:

        exp(x_n) = 1 - \prod_{i=0}^n (1 - exp(a_n)),

    Thus, we have x_0 = a_0 and the recursion relation:

        exp(x_{n+1}) = exp(x_n) + exp(b_{n+1}),

    where

        b_{n+1} = a_{n+1} + log(1 - exp(x_n)).

    We sort `a` in the descending order and update `x` term by term. It is easy to show that x_{n} is monotonically
    increasing and that |x_{N} - x_{n}| < (N - n) |x_{n} - x_{n-1}|. We use the last inequality to bound the error
    for early stopping.

    Args:
        a: a float array
        rel_tol: relative error tolerance for early stopping of calculation

    Returns:
        a float scalar
    """
    try:
        assert isinstance(a, np.ndarray)
        a = np.asarray(a.copy(), dtype=np.float)
    except AssertionError:
        try:
            a = np.asarray(a, dtype=np.float)
        except ValueError:
            raise ValueError("The input argument must be castable to a float ndarray.")
    assert len(a) > 0
    assert 0. <= rel_tol < 1.0

    # enforce all entries of a to be negative or zero
    a[a > 0.] = 0.

    if len(a) == 1:
        return np.asscalar(a)
    else:
        a = np.sort(a.flatten())[::-1]
        x = a[0]
        sz = len(a)
        for i, entry in enumerate(a[1:]):
            x_new = np.logaddexp(x, entry + logp_complement(x))
            if np.abs(x_new - x) * (sz - i - 1) < rel_tol * np.abs(x):
                return x_new
            else:
                x = x_new
        return x
コード例 #5
0
ファイル: benchmark_result.py プロジェクト: JRetza/emukit
    def add_results(self, loop_name: str, i_repeat: int, metric_name: str, metric_values: np.ndarray) -> None:
        """
        Add results for a specific loop, metric and repeat combination

        :param loop_name: Name of loop
        :param i_repeat: Index of repeat
        :param metric_name: Name of metric
        :param metric_values: Metric values to add
        """
        self._results[loop_name][metric_name][i_repeat] = metric_values.flatten()
コード例 #6
0
ファイル: _nphusl_expr.py プロジェクト: TadLeonard/husl-numpy
def _f(y_nd: ndarray) -> ndarray:
    y_flat = y_nd.flatten()
    f_flat = np.zeros(y_flat.shape, dtype=np.float)
    gt = y_flat > constants.EPSILON
    lt = ~gt
    y_flat_gt = y_flat[gt]
    y_flat_lt = y_flat[lt]
    ref_y = constants.REF_Y
    kappa = constants.KAPPA
    f_flat[gt] = ne.evaluate("(y_flat_gt / ref_y) ** (1. / 3.) * 116 - 16")
    f_flat[lt] = ne.evaluate("(y_flat_lt / ref_y) * kappa")
    return f_flat.reshape(y_nd.shape)
コード例 #7
0
ファイル: evaluate.py プロジェクト: fmcc/mss_layout_analysis
def normalised_confusion_matrix(label:np.ndarray, image:np.ndarray):
    """ Create a normalised confusion matrix for a label and image in the same format. 
        """
    cm = confusion_matrix(label.flatten(), image.flatten())
    cm_normalised = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    return cm_normalised
コード例 #8
0
def spei(
    precips_mm: np.ndarray,
    pet_mm: np.ndarray,
    scale: int,
    distribution: Distribution,
    periodicity: compute.Periodicity,
    data_start_year: int,
    calibration_year_initial: int,
    calibration_year_final: int,
    fitting_params: dict = None,
) -> np.ndarray:
    """
    Compute SPEI fitted to the gamma distribution.

    PET values are subtracted from the precipitation values to come up with an array
    of (P - PET) values, which is then scaled to the specified months scale and
    finally fitted/transformed to SPEI values corresponding to the input
    precipitation time series.

    :param precips_mm: an array of monthly total precipitation values,
        in millimeters, should be of the same size (and shape?) as the input PET array
    :param pet_mm: an array of monthly PET values, in millimeters,
        should be of the same size (and shape?) as the input precipitation array
    :param scale: the number of months over which the values should be scaled
        before computing the indicator
    :param distribution: distribution type to be used for the internal
        fitting/transform computation
    :param periodicity: the periodicity of the time series represented by the
        input data, valid/supported values are 'monthly' and 'daily'
        'monthly' indicates an array of monthly values, assumed to span full
         years, i.e. the first value corresponds to January of the initial year
         and any missing final months of the final year filled with NaN values,
         with size == # of years * 12
         'daily' indicates an array of full years of daily values with 366 days
         per year, as if each year were a leap year and any missing final months
         of the final year filled with NaN values, with array size == (# years * 366)
    :param data_start_year: the initial year of the input datasets (assumes that
        the two inputs cover the same period)
    :param calibration_year_initial: initial year of the calibration period
    :param calibration_year_final: final year of the calibration period
    :param fitting_params: optional dictionary of pre-computed distribution
        fitting parameters, if the distribution is gamma then this dict should
        contain two arrays, keyed as "alphas" and "betas", and if the
        distribution is Pearson then this dict should contain four arrays keyed
        as "probabilities_of_zero", "locs", "scales", and "skews"
    :return: an array of SPEI values
    :rtype: numpy.ndarray of type float, of the same size and shape as the input
        PET and precipitation arrays
    """

    # if we're passed all missing values then we can't compute anything,
    # so we return the same array of missing values
    if (np.ma.is_masked(precips_mm) and precips_mm.mask.all()) \
            or np.all(np.isnan(precips_mm)):
        return precips_mm

    # validate that the two input arrays are compatible
    if precips_mm.size != pet_mm.size:
        message = "Incompatible precipitation and PET arrays"
        _logger.error(message)
        raise ValueError(message)

    # clip any negative values to zero
    if np.amin(precips_mm) < 0.0:
        _logger.warn(
            "Input contains negative values -- all negatives clipped to zero")
        precips_mm = np.clip(precips_mm, a_min=0.0, a_max=None)

    # subtract the PET from precipitation, adding an offset
    # to ensure that all values are positive
    p_minus_pet = (precips_mm.flatten() - pet_mm.flatten()) + 1000.0

    # remember the original length of the input array, in order to facilitate
    # returning an array of the same size
    original_length = precips_mm.size

    # get a sliding sums array, with each element's value
    # scaled by the specified number of time steps
    scaled_values = compute.sum_to_scale(p_minus_pet, scale)

    if distribution is Distribution.gamma:

        # get (optional) fitting parameters if provided
        if fitting_params is not None:
            alphas = fitting_params["alphas"]
            betas = fitting_params["betas"]
        else:
            alphas = None
            betas = None

        # fit the scaled values to a gamma distribution and
        # transform to corresponding normalized sigmas
        transformed_fitted_values = \
            compute.transform_fitted_gamma(
                scaled_values,
                data_start_year,
                calibration_year_initial,
                calibration_year_final,
                periodicity,
                alphas,
                betas,
            )

    elif distribution is Distribution.pearson:

        # get (optional) fitting parameters if provided
        if fitting_params is not None:
            probabilities_of_zero = fitting_params["probabilities_of_zero"]
            locs = fitting_params["locs"]
            scales = fitting_params["scales"]
            skews = fitting_params["skews"]
        else:
            probabilities_of_zero = None
            locs = None
            scales = None
            skews = None

        # fit the scaled values to a Pearson Type III distribution
        # and transform to corresponding normalized sigmas
        transformed_fitted_values = \
            compute.transform_fitted_pearson(
                scaled_values,
                data_start_year,
                calibration_year_initial,
                calibration_year_final,
                periodicity,
                probabilities_of_zero,
                locs,
                scales,
                skews,
            )

    else:
        message = "Unsupported distribution argument: " + \
                  "{dist}".format(dist=distribution)
        _logger.error(message)
        raise ValueError(message)

    # clip values to within the valid range, reshape the array back to 1-D
    values = \
        np.clip(transformed_fitted_values,
                _FITTED_INDEX_VALID_MIN,
                _FITTED_INDEX_VALID_MAX).flatten()

    # return the original size array
    return values[0:original_length]
コード例 #9
0
def prediction_intervall_plot(y: np.ndarray,
                              y_hat: np.ndarray,
                              title: str = '') -> Tuple[mpl.figure.Figure, np.ndarray]:
    fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(7, 3), gridspec_kw={'width_ratios': [3, 5]})

    y_flat = y.flatten()

    # only take part of y to have a better zoom-in
    x_bnd = np.arange(0, 1400)
    y_len = len(x_bnd)

    # hydrograph:
    y_r = [0, 0, 0, 0, 0, 0]  # used later for probability-plot
    quantiles = [0.95, 0.9, 0.80, 0.50, 0.20, 0.1]
    labels_and_colors = {
        'labels': [
            '2.5-97.5 percentile', '05-95 percentile', '10-90 percentile', '25-75 percentile', '40-60 percentile',
            '45-55 percentile'
        ],
        'colors': ['#FDE725', '#8FD744', '#35B779', '#21908C', '#31688E', '#443A83']
    }
    for idx in range(len(quantiles)):
        lb = round(50 - (quantiles[idx] * 100) / 2)
        ub = round(50 + (quantiles[idx] * 100) / 2)
        y_lb = np.percentile(y_hat[x_bnd, :, :], lb, axis=-1).flatten()
        y_ub = np.percentile(y_hat[x_bnd, :, :], ub, axis=-1).flatten()
        y_r[idx] = np.sum(((y_flat[x_bnd] > y_lb) * (y_flat[x_bnd] < y_ub))) / y_len
        if idx <= 3:
            axs[1].fill_between(x_bnd,
                                y_lb,
                                y_ub,
                                color=labels_and_colors['colors'][idx],
                                label=labels_and_colors['labels'][idx])

    y_median = np.median(y_hat, axis=-1).flatten()
    axs[1].plot(x_bnd, y_median[x_bnd], '-', color='red', label="median")
    axs[1].plot(x_bnd, y_flat[x_bnd], '--', color='black', label="observed")
    axs[1].legend(prop={'size': 5})

    # probability-plot:
    axs[0].plot([0, 1], [0, 1], 'k--')
    for idx in range(1, len(quantiles) - 1):
        # move description out of the way:
        is_quantile_small = quantiles[idx] <= 0.5
        ha_argument = 'right' if is_quantile_small else 'left'
        text_pos = 1 if is_quantile_small else 0
        l_coord = [text_pos, quantiles[idx]] if is_quantile_small else [quantiles[idx], text_pos]

        axs[0].plot(l_coord, [y_r[idx], y_r[idx]], ':', color='#ffb95a')
        axs[0].text(text_pos,
                    y_r[idx],
                    f'{round(y_r[idx], 2)}',
                    fontsize=8,
                    va='center',
                    ha=ha_argument,
                    c='#ffb95a',
                    backgroundcolor='w')

    axs[0].plot(quantiles, y_r, 'ro-')
    axs[0].set_axisbelow(True)
    axs[0].yaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].yaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].set_xlabel("prediction intervals")
    axs[0].set_ylabel("obs in quantiles")
    axs[0].set_title(title)

    fig.tight_layout()

    return fig, axs
コード例 #10
0
ファイル: part_b.py プロジェクト: dtch1997/AA203-Homework
 def Q(self, val: np.ndarray):
     val = val.flatten()
     self.estimator.C[:self.state_dim**2] = val.reshape(-1, 1)
コード例 #11
0
ファイル: spline_fxns.py プロジェクト: neurodata/brainlit
def speed(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the speed of a B-Spline.

    The speed is the norm of the first derivative of the B-Spline.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the speed of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        speed: A `1xL` array containing the speed of the B-Spline evaluated at `x`

    References:
    .. [1] Kouba, Parametric Equations.
        https://www.math.ucdavis.edu/~kouba/Math21BHWDIRECTORY/ArcLength.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first and second derivatives
    # deriv, dderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
    # tranpose deriv
    deriv = deriv.T

    speed = np.linalg.norm(deriv, axis=1)
    if aux_outputs == False:
        return speed
    else:
        return speed, deriv
コード例 #12
0
def calc_fdc_fms(obs: np.ndarray,
                 sim: np.ndarray,
                 m1: float = 0.2,
                 m2: float = 0.7) -> float:
    """[summary]
    
    Parameters
    ----------
    obs : np.ndarray
        Array containing the discharge observations
    sim : np.ndarray
        Array containing the discharge simulations
    m1 : float, optional
        Lower bound of the middle section. Has to be in range(0,1), by default 0.2
    m2 : float, optional
        Upper bound of the middle section. Has to be in range(0,1), by default 0.2
    
    Returns
    -------
    float
        Bias of the middle slope of the flow duration curve (Yilmaz 2018).
    
    Raises
    ------
    RuntimeError
        If `obs` and `sim` don't have the same length
    RuntimeError
        If `m1` is not in range(0,1)
    RuntimeError
        If `m2` is not in range(0,1)
    RuntimeError
        If `m1` >= `m2`
    """
    # make sure that metric is calculated over the same dimension
    obs = obs.flatten()
    sim = sim.flatten()

    if obs.shape != sim.shape:
        raise RuntimeError("obs and sim must be of the same length.")

    if (m1 <= 0) or (m1 >= 1):
        raise RuntimeError("m1 has to be in the range (0,1)")

    if (m2 <= 0) or (m2 >= 1):
        raise RuntimeError("m1 has to be in the range (0,1)")

    if m1 >= m2:
        raise RuntimeError("m1 has to be smaller than m2")

    # for numerical reasons change 0s to 1e-6
    sim[sim == 0] = 1e-6
    obs[obs == 0] = 1e-6

    # sort both in descending order
    obs = -np.sort(-obs)
    sim = -np.sort(-sim)

    # calculate fms part by part
    qsm1 = np.log(sim[np.round(m1 * len(sim)).astype(int)] + 1e-6)
    qsm2 = np.log(sim[np.round(m2 * len(sim)).astype(int)] + 1e-6)
    qom1 = np.log(obs[np.round(m1 * len(obs)).astype(int)] + 1e-6)
    qom2 = np.log(obs[np.round(m2 * len(obs)).astype(int)] + 1e-6)

    fms = ((qsm1 - qsm2) - (qom1 - qom2)) / (qom1 - qom2 + 1e-6)

    return fms * 100
コード例 #13
0
ファイル: reinforce.py プロジェクト: Limmen/gym-idsgame
    def get_action(self,
                   state: np.ndarray,
                   attacker: bool = True,
                   legal_actions: List = None,
                   non_legal_actions: List = None) -> Union[int, torch.Tensor]:
        """
        Samples an action from the policy network

        :param state: the state to sample an action for
        :param attacker: boolean flag whether running in attacker mode (if false assume defender)
        :param legal_actions: list of allowed actions
        :param non_legal_actions: list of disallowed actions
        :return: The sampled action id
        """
        if self.config.lstm_network:
            state = torch.from_numpy(
                state.reshape(1, state.shape[0],
                              state.shape[1] * state.shape[2])).float()
        else:
            state = torch.from_numpy(state.flatten()).float()

        # Move to GPU if using GPU
        if torch.cuda.is_available() and self.config.gpu:
            device = torch.device("cuda:" + str(self.config.gpu_id))
            state = state.to(device)

        # Calculate legal actions
        if attacker:
            actions = list(range(self.env.num_attack_actions))
            if not self.env.local_view_features() or (
                    legal_actions is None or non_legal_actions is None):
                legal_actions = list(
                    filter(lambda action: self.env.is_attack_legal(action),
                           actions))
                non_legal_actions = list(
                    filter(lambda action: not self.env.is_attack_legal(action),
                           actions))
        else:
            actions = list(range(self.env.num_defense_actions))
            legal_actions = list(
                filter(lambda action: self.env.is_defense_legal(action),
                       actions))
            non_legal_actions = list(
                filter(lambda action: not self.env.is_defense_legal(action),
                       actions))

        # Forward pass using the current policy network to predict P(a|s)
        if attacker:
            action_probs = self.attacker_policy_network(state).squeeze()
            # Set probability of non-legal actions to 0
            action_probs_1 = action_probs.clone()
            if len(legal_actions) > 0 and len(
                    non_legal_actions) < self.env.num_attack_actions:
                action_probs_1[non_legal_actions] = 0
        else:
            action_probs = self.defender_policy_network(state).squeeze()
            # Set probability of non-legal actions to 0
            action_probs_1 = action_probs.clone()
            # print("state shape:{}".format(state.shape))
            # print("action shape:{}".format(action_probs_1.shape))
            if len(legal_actions) > 0 and len(
                    non_legal_actions) < self.env.num_defense_actions:
                action_probs_1[non_legal_actions] = 0

        # Use torch.distributions package to create a parameterizable probability distribution of the learned policy
        # PG uses a trick to turn the gradient into a stochastic gradient which we can sample from in order to
        # approximate the true gradient (which we can’t compute directly). It can be seen as an alternative to the
        # reparameterization trick
        policy_dist = Categorical(action_probs_1)

        # Sample an action from the probability distribution
        try:
            action = policy_dist.sample()
        except Exception as e:
            print(
                "Nan values in distribution, consider using a lower learnign rate or gradient clipping"
            )
            print("legal actions: {}".format(legal_actions))
            print("non_legal actions: {}".format(non_legal_actions))
            print("action_probs: {}".format(action_probs))
            print("action_probs_1: {}".format(action_probs_1))
            print("state: {}".format(state))
            print("policy_dist: {}".format(policy_dist))
            action = torch.tensor(0).type(torch.LongTensor)

        # log_prob returns the log of the probability density/mass function evaluated at value.
        # save the log_prob as it will use later on for computing the policy gradient
        # policy gradient theorem says that the stochastic gradient of the expected return of the current policy is
        # the log gradient of the policy times the expected return, therefore we save the log of the policy distribution
        # now and use it later to compute the gradient once the episode has finished.
        log_prob = policy_dist.log_prob(action)

        return action.item(), log_prob
コード例 #14
0
    def _mask_negative_weights(self, weights: np.ndarray) -> list:
        """
        Seek for all relevant weights whose values are negative. Mask those values with optimization constraints
        in the interval [0, 0].

        Parameters
        ----------
        weights : np.ndarray
            Flattened weights array

        Returns
        -------
        list
            Indices of masked values.
        """

        num_weights = len(weights)

        # --------------------------------------------------------------------------------
        # check calculated weights
        # if either param a or param b < 0, the second distribution's parameter is fixed to zero
        # the logistic fit is repeated afterwards with remaining distribution

        # first step: extract weights without bias and get all weights below 0

        # on detection or binary classification, only face the confidence weights
        if self.detection or self._is_binary_classification():
            weights = weights[1:3]

            # on multiclass classification, face all weights (without biases)
        else:
            weights = weights[self.num_classes:]

        # now check if negative entries are present
        masked_weights = np.where((weights.flatten() < 0))[0]

        # weights below 0 found?
        # we need to make sure that not both weights are masked out for a single class
        if len(masked_weights):

            # on detection or binary classification, only keep the confidence dimension monotonically increasing
            # this is equivalent to the first dimension (weights 0 and 1)
            if self.detection or self._is_binary_classification():
                if len(masked_weights) == 2:
                    masked_weights = np.array([masked_weights[0]])

            # same on multiclass classification but for each class
            else:
                for cls in range(self.num_classes):
                    index_a = 2 * cls
                    index_b = 2 * cls + 1

                    # remove index of the second weight for this certain class from the masked weights array
                    if index_a in masked_weights and index_b in masked_weights:
                        masked_weights = np.delete(
                            masked_weights,
                            np.argwhere(masked_weights == index_b))

            # increase indices for optimization routine due to bias values
            masked_weights += 1 if self.detection or self._is_binary_classification(
            ) else self.num_classes

        return masked_weights
コード例 #15
0
    def update(
        self, eta: np.ndarray, P: np.ndarray, z: np.ndarray
    ) -> Tuple[np.ndarray, np.ndarray, float, np.ndarray]:
        """Update eta and P with z, associating landmarks and adding new ones.

        Parameters
        ----------
        eta : np.ndarray
            [description]
        P : np.ndarray
            [description]
        z : np.ndarray, shape=(#detections, 2)
            [description]

        Returns
        -------
        Tuple[np.ndarray, np.ndarray, float, np.ndarray]
            [description]
        """
        numLmk = (eta.size - 3) // 2
        assert (len(eta) - 3) % 2 == 0, "EKFSLAM.update: landmark lenght not even"

        if numLmk > 0:
            # Prediction and innovation covariance
            zpred = self.h(eta)
            H = self.H(eta)

            # Here you can use simply np.kron (a bit slow) to form the big (very big in VP after a while) R,
            # or be smart with indexing and broadcasting (3d indexing into 2d mat) realizing you are adding the same R on all diagonals
            
            R_repeated =  np.diag(np.diagonal(ml.repmat(self.R, numLmk, numLmk)))
     
            S = H @ P @ H.T + R_repeated
            assert (
                S.shape == zpred.shape * 2
            ), "EKFSLAM.update: wrong shape on either S or zpred"
            z = z.ravel()  # 2D -> flat

            # Perform data association
            za, zpred, Ha, Sa, a = self.associate(z, zpred, H, S)

            # No association could be made, so skip update
            if za.shape[0] == 0:
                etaupd = eta 
                Pupd = P  
                NIS = 1 # TODO: beware this one when analysing consistency

            else:
                # Create the associated innovation
                v = za.ravel() - zpred  # za: 2D -> flat
                v[1::2] = utils.wrapToPi(v[1::2])

                # Kalman mean update
                S_cho_factors = la.cho_factor(Sa) # Optional, used in places for S^-1, see scipy.linalg.cho_factor and scipy.linalg.cho_solve
                W = la.cho_solve(S_cho_factors, Ha @ P.T).T # Kalman gain
                etaupd = eta + W @ v # Kalman update

                # Kalman cov update: use Joseph form for stability
                jo = -W @ Ha
                jo[np.diag_indices(jo.shape[0])] += 1  # same as adding Identity mat
                Pupd = jo @ P # Kalman update. This is the main workload on VP after speedups

                # calculate NIS, can use S_cho_factors
                NIS = v.T @ la.cho_solve(S_cho_factors, v)

                # When tested, remove for speed
                assert np.allclose(Pupd, Pupd.T), "EKFSLAM.update: Pupd not symmetric"
                assert np.all(
                    np.linalg.eigvals(Pupd) > 0
                ), "EKFSLAM.update: Pupd not positive definite"

        else:  # All measurements are new landmarks,
            a = np.full(z.shape[0], -1)
            z = z.flatten()
            NIS = 1 # TODO: beware this one, you can change the value to for instance 1
            etaupd = eta
            Pupd = P

        # Create new landmarks if any is available
        if self.do_asso:
            is_new_lmk = a == -1
            if np.any(is_new_lmk):
                z_new_inds = np.empty_like(z, dtype=bool)
                z_new_inds[::2] = is_new_lmk
                z_new_inds[1::2] = is_new_lmk
                z_new = z[z_new_inds]
                etaupd, Pupd = self.add_landmarks(etaupd, Pupd, z_new)# Add new landmarks.

        assert np.allclose(Pupd, Pupd.T), "EKFSLAM.update: Pupd must be symmetric"
        assert np.all(np.linalg.eigvals(Pupd) >= 0), "EKFSLAM.update: Pupd must be PSD"

        return etaupd, Pupd, NIS, a
コード例 #16
0
def binimage2bitstream(bin_image: np.ndarray):
    # bin_image is a numpy int array consists of only 1s and 0s
    # input follows thermal printer's mechanism: 1 is black (printed) and 0 is white (left untouched)
    assert bin_image.max() <= 1 and bin_image.min() >= 0
    return _pack_block(''.join(map(str, bin_image.flatten())))
コード例 #17
0
def plot_gp_dist(
    ax,
    samples: np.ndarray,
    x: np.ndarray,
    plot_samples=True,
    palette="Reds",
    fill_alpha=0.8,
    samples_alpha=0.1,
    fill_kwargs=None,
    samples_kwargs=None,
):
    """A helper function for plotting 1D GP posteriors from trace

        Parameters
    ----------
    ax: axes
        Matplotlib axes.
    samples: numpy.ndarray
        Array of S posterior predictive sample from a GP.
        Expected shape: (S, X)
    x: numpy.ndarray
        Grid of X values corresponding to the samples.
        Expected shape: (X,) or (X, 1), or (1, X)
    plot_samples: bool
        Plot the GP samples along with posterior (defaults True).
    palette: str
        Palette for coloring output (defaults to "Reds").
    fill_alpha: float
        Alpha value for the posterior interval fill (defaults to 0.8).
    samples_alpha: float
        Alpha value for the sample lines (defaults to 0.1).
    fill_kwargs: dict
        Additional arguments for posterior interval fill (fill_between).
    samples_kwargs: dict
        Additional keyword arguments for samples plot.

    Returns
    -------

    ax: Matplotlib axes
    """
    import matplotlib.pyplot as plt

    if fill_kwargs is None:
        fill_kwargs = {}
    if samples_kwargs is None:
        samples_kwargs = {}
    if np.any(np.isnan(samples)):
        warnings.warn(
            "There are `nan` entries in the [samples] arguments. "
            "The plot will not contain a band!",
            UserWarning,
        )

    cmap = plt.get_cmap(palette)
    percs = np.linspace(51, 99, 40)
    colors = (percs - np.min(percs)) / (np.max(percs) - np.min(percs))
    samples = samples.T
    x = x.flatten()
    for i, p in enumerate(percs[::-1]):
        upper = np.percentile(samples, p, axis=1)
        lower = np.percentile(samples, 100 - p, axis=1)
        color_val = colors[i]
        ax.fill_between(x,
                        upper,
                        lower,
                        color=cmap(color_val),
                        alpha=fill_alpha,
                        **fill_kwargs)
    if plot_samples:
        # plot a few samples
        idx = np.random.randint(0, samples.shape[1], 30)
        ax.plot(x,
                samples[:, idx],
                color=cmap(0.9),
                lw=1,
                alpha=samples_alpha,
                **samples_kwargs)

    return ax
コード例 #18
0
 def get_batched_features(items: np.ndarray, df_val: Union[int, float], attr_f: Union[str, Callable], dtype=None):
     if isinstance(attr_f, str):
         _local_attr_str = str(attr_f)
         attr_f = lambda x: getattr(x, _local_attr_str)
     # --
     flattened_vals = BK.input_tensor([df_val if z is None else attr_f(z) for z in items.flatten()], dtype=dtype)
     ret = flattened_vals.view(BK.get_shape(items))
     return ret
コード例 #19
0
ファイル: np2.py プロジェクト: yulkang/2D_Decision
def cell2mat(c: np.ndarray, dtype=np.float) -> np.ndarray:
    # convert from object to numeric
    shape0 = c.shape
    vs = np.stack([v.astype(dtype) for v in c.flatten()])
    return np.reshape(vs, shape0 + vs[0].shape)
コード例 #20
0
def uncertainty_plot(y: np.ndarray, y_hat: np.ndarray, title: str = '') -> Tuple[mpl.figure.Figure, np.ndarray]:
    """Plots probability plot alongside a hydrograph with simulation percentiles.
    
    The probability plot itself is analogous to the calibration plot for classification tasks. The plot compares the 
    theoretical percentiles of the estimated conditional distributions (over time) with the respective relative 
    empirical counts. 
    The probability plot is often also referred to as probability integral transform diagram, Q-Q plot, or predictive 
    Q-Q plot. 
    

    Parameters
    ----------
    y : np.ndarray
        Array of observed values.
    y_hat : np.ndarray
        Array of simulated values.
    title : str, optional
        Title of the plot, by default empty.

    Returns
    -------
    Tuple[mpl.figure.Figure, np.ndarray]
        The uncertainty plot.
    """

    fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(6.5, 3), gridspec_kw={'width_ratios': [4, 5]})

    # only take part of y to have a better zoom-in
    y_long = y.flatten()
    y_hat_long = y_hat.reshape(y_long.shape[0], -1)
    x_bnd = np.arange(0, 400)
    y_bnd_len = len(x_bnd)

    # hydrograph:
    y_r = [0, 0, 0, 0, 0, 0]  # used later for probability-plot
    quantiles = [0.9, 0.80, 0.50, 0.20, 0.1]
    labels_and_colors = {
        'labels': ['05-95 PI', '10-90 PI', '25-75 PI', '40-60 PI', '45-55 PI'],
        'colors': ['#FDE725', '#8FD744', '#21908C', '#31688E', '#443A83']
    }
    for idx in range(len(quantiles)):
        lb = round(50 - (quantiles[idx] * 100) / 2)
        ub = round(50 + (quantiles[idx] * 100) / 2)
        y_lb = np.percentile(y_hat_long[x_bnd, :], lb, axis=-1).flatten()
        y_ub = np.percentile(y_hat_long[x_bnd, :], ub, axis=-1).flatten()
        y_r[idx] = np.sum(((y_long[x_bnd] > y_lb) * (y_long[x_bnd] < y_ub))) / y_bnd_len
        if idx <= 3:
            axs[1].fill_between(x_bnd,
                                y_lb,
                                y_ub,
                                color=labels_and_colors['colors'][idx],
                                label=labels_and_colors['labels'][idx])

    y_median = np.median(y_hat_long, axis=-1).flatten()
    axs[1].plot(x_bnd, y_median[x_bnd], '-', color='red', label="median")
    axs[1].plot(x_bnd, y_long[x_bnd], '--', color='black', label="observed")
    axs[1].legend(prop={'size': 5})
    axs[1].set_ylabel("runoff")
    axs[1].set_xlabel("time index")
    # probability-plot:
    quantiles = np.arange(0, 101, 5)
    y_r = quantiles * 0.0
    for idx in range(len(y_r)):
        ub = quantiles[idx]
        y_ub = np.percentile(y_hat_long[x_bnd, :], ub, axis=-1).flatten()
        y_r[idx] = np.sum(y_long[x_bnd] < y_ub) / y_bnd_len

    axs[0].plot([0, 1], [0, 1], 'k--')
    axs[0].plot(quantiles / 100, y_r, 'ro', ms=3.0)
    axs[0].set_axisbelow(True)
    axs[0].yaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.grid(color='#ECECEC', linestyle='dashed')
    axs[0].xaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].yaxis.set_ticks(np.arange(0, 1, 0.2))
    axs[0].set_xlabel("theoretical quantile frequency")
    axs[0].set_ylabel("count")

    fig.suptitle(title, fontsize=14)
    fig.tight_layout(rect=[0, 0.1, 1, 0.95])

    return fig, axs
コード例 #21
0
def draw_histogram(img: np.ndarray, ax):
    level = int(2**(8 * img.nbytes / img.size))
    ax.hist(img.flatten(), bins=level, density=True, histtype='stepfilled')
コード例 #22
0
ファイル: rfr_imputator.py プロジェクト: chrinide/SMAC3
    def impute(self, censored_X: np.ndarray, censored_y: np.ndarray,
               uncensored_X: np.ndarray,
               uncensored_y: np.ndarray) -> typing.Optional[np.ndarray]:
        """
        Imputes censored runs and returns new y values.

        Parameters
        ----------
        censored_X : np.ndarray [N, M]
            Feature array of all censored runs.
        censored_y : np.ndarray [N, 1]
            Target values for all runs censored runs.
        uncensored_X : np.ndarray [N, M]
            Feature array of all non-censored runs.
        uncensored_y : np.ndarray [N, 1]
            Target values for all non-censored runs.

        Returns
        ----------
        imputed_y : np.ndarray
            Same shape as censored_y [N, 1]
        """
        if censored_X.shape[0] == 0:
            self.logger.critical("Nothing to impute, return None")
            return None

        censored_y = censored_y.flatten()
        uncensored_y = uncensored_y.flatten()

        # first learn model without censored data
        self.model.train(uncensored_X, uncensored_y)

        self.logger.debug("Going to impute %d y-values with %s" %
                          (censored_X.shape[0], str(self.model)))

        imputed_y = None  # define this, if imputation fails

        # Define variables
        y = np.empty(
            (0, )
        )  # This only defines the type, the actual value will not be used later on.

        it = 1
        change = 0

        while True:
            self.logger.debug("Iteration %d of %d" % (it, self.max_iter))

            # predict censored y values
            y_mean, y_var = self.model.predict(censored_X)
            assert y_var is not None  # please mypy

            y_var[y_var < self.var_threshold] = self.var_threshold
            y_stdev = np.sqrt(y_var)[:, 0]
            y_mean = y_mean[:, 0]

            # ignore the warnings of truncnorm.stats
            # since we handle them appropriately
            with warnings.catch_warnings():
                warnings.filterwarnings(
                    'ignore',
                    r'invalid value encountered in (subtract|true_divide|power).*'
                )
                warnings.filterwarnings(
                    'ignore',
                    r'divide by zero encountered in (true_divide|log).*')
                imputed_y = truncnorm.stats(a=(censored_y - y_mean) / y_stdev,
                                            b=(self.threshold - y_mean) /
                                            y_stdev,
                                            loc=y_mean,
                                            scale=y_stdev,
                                            moments='m')

            imputed_y = np.array(imputed_y)

            nans = ~np.isfinite(imputed_y)
            n_nans = sum(nans)
            if n_nans > 0:
                # Replace all nans with maximum of predicted perf and censored value
                # this happens if the prediction is far smaller than the
                # censored data point
                self.logger.debug("Going to replace %d nan-value(s) with "
                                  "max(captime, predicted mean)" % n_nans)
                imputed_y[nans] = np.max([censored_y[nans], y_mean[nans]],
                                         axis=0)

            if it > 1:
                # Calc mean difference between imputed values this and last
                # iteration, assume imputed values are always concatenated
                # after uncensored values

                change = np.mean(
                    np.abs(imputed_y - y[uncensored_y.shape[0]:]) /
                    y[uncensored_y.shape[0]:])

            # lower all values that are higher than threshold
            # should probably never happen
            imputed_y[imputed_y >= self.threshold] = self.threshold

            self.logger.debug("Change: %f" % change)

            X = np.concatenate((uncensored_X, censored_X))
            y = np.concatenate((uncensored_y, imputed_y))

            if change > self.change_threshold or it == 1:
                self.model.train(X, y)
            else:
                break

            it += 1
            if it > self.max_iter:
                break

        self.logger.debug("Imputation used %d/%d iterations, last_change=%f" %
                          (it - 1, self.max_iter, change))

        # replace all y > cutoff with PAR10 values (i.e., threshold)
        imputed_y = np.array(imputed_y, dtype=np.float)
        imputed_y[imputed_y >= self.cutoff] = self.threshold

        if not np.isfinite(imputed_y).all():
            self.logger.critical("Imputed values are not finite, %s" %
                                 str(imputed_y))
        return np.reshape(imputed_y, [imputed_y.shape[0], 1])
コード例 #23
0
ファイル: test_rpc_utils.py プロジェクト: MilanSas/FPS-Simple
def generate_uncompressed_proto_obs(in_array: np.ndarray) -> ObservationProto:
    obs_proto = ObservationProto()
    obs_proto.float_data.data.extend(in_array.flatten().tolist())
    obs_proto.compression_type = NONE
    obs_proto.shape.extend(in_array.shape)
    return obs_proto
コード例 #24
0
ファイル: files.py プロジェクト: cxz/datapane
 def write(self, x: ndarray) -> DPTmpFile:
     f: Figure = x.flatten()[0].get_figure()
     return super()._write_figure(f)
コード例 #25
0
def solve_normal_interference(
        interference: float,
        gap: np.ndarray,
        model: _ContactModelABC,
        current_state: dict,
        adhesive_pressure: typing.Union[float, typing.Callable] = None,
        contact_nodes: np.ndarray = None,
        max_iter: int = 100,
        tol: float = 1e-4,
        initial_guess_loads: np.ndarray = None,
        material_options: dict = None,
        remove_percent: float = 0.5,
        node_thresh_percent: int = 0.01):
    """Solves contact with set normal interference

    Parameters
    ----------
    interference: float
        The interference between the surfaces measured from the point of first contact
    gap: np.ndarray
        The undeformed gap between the surfaces at the moment of first contact
    model: _ContactModelABC
        A contact model object containing the surfaces
    current_state: dict
        The state dict for the model before this step is solved
    adhesive_pressure: {float, Callable}, optional (None)
        The maximum adhesive force between the two surfaces, or a callable which wll be called as following:
        adhesive_force(surface_loads, deformed_gap, contact_nodes, model) and must return two boolean arrays containing
        the nodes to be removed and the nodes to be added in the iteration.
    contact_nodes: np.ndarray
        Boolean array of the surface nodes in contact at the start of the calculation, if set loading will be confined
        to these nodes
    material_options: dict
        Dict of options to be passed to the loads_from_surface_displacement method of the first surface
    max_iter: int
        The maximum number of iterations to find a stable set of contact nodes
    tol: float
        The tolerance on the solution
    initial_guess_loads: np.ndarray
        The initial guess for the loads, used in the optimisation step, must be the same shape as the gap array
        if this is not supplied the materials are used to generate an initial guess, this is often less accurate than
        using the previous time step, especially when the time step is short
    remove_percent: float
        The percentage of the current contact nodes which can be removed in a single iteration
    node_thresh_percent: float
        Percentage of contact nodes which can need to be added before the solution is converged

    Returns
    -------
    loads_z: array
        A named tuple of surface loads
    total_displacement_z: array
        A named tuple of the total displacement
    surface_1_displacement_z: array
        A named tuple of the displacement on surface 1
    surface_2_displacement_z: array
        A named tuple of the displacement on surface 2
    contact_nodes: np.ndarray
        A boolean array of nodes in contact
    failed: bool
        False if the solution converged

    Notes
    -----

    """

    surf_1 = model.surface_1
    surf_2 = model.surface_2

    material_options = material_options or dict()

    if contact_nodes is None and adhesive_pressure is not None:
        warnings.warn(
            'Contact nodes not set from previous step results may show unphysical adhesion force, use a '
            'no adhesion step to initialise the contact nodes to avoid this behaviour'
        )

    if adhesive_pressure is None:
        adhesive_pressure = 0

    z = interference - gap  # necessary displacement for completely touching, positive is into surfaces

    if contact_nodes is None:
        contact_nodes = z > 0

    if not np.any(contact_nodes.flatten()):
        print('no_contact_nodes')
        zeros = np.zeros_like(z)
        return (zeros, zeros.copy(), zeros.copy(), zeros.copy(), contact_nodes,
                False)

    if isinstance(surf_1.material, _IMMaterial) and isinstance(
            surf_2.material, _IMMaterial):
        raise ValueError("Use the height optimiser function")
    # if not both influence matrix based materials
    displacements = z.copy()
    displacements[np.logical_not(contact_nodes)] = np.nan

    it_num = 0
    added_nodes_last_it = np.inf
    failed = False

    while True:

        loads, disp_tup = surf_1.material.loads_from_surface_displacement(
            displacements=displacements,
            grid_spacing=surf_1.grid_spacing,
            other=surf_2.material,
            current_state=current_state,
            **material_options)

        # find deformed nd_gap and add contacting nodes to the contact nodes
        deformed_gap = gap - interference + disp_tup[
            0].z  # the nd_gap minus the interference plus the displacement

        force_another_iteration = False
        n_contact_nodes = sum(contact_nodes.flatten())

        print('Total contact nodes:', n_contact_nodes)

        if isinstance(adhesive_pressure, Number):
            nodes_to_remove = np.logical_and(loads.z < adhesive_pressure,
                                             contact_nodes)
            nodes_to_add = np.logical_and(deformed_gap < 0,
                                          np.logical_not(contact_nodes))
            print('Nodes to add: ', sum(nodes_to_add.flatten()))
            # noinspection PyUnresolvedReferences
            print('Nodes to remove raw: ', sum(nodes_to_remove.flatten()))

            max_remove = int(
                min(n_contact_nodes * remove_percent,
                    0.5 * added_nodes_last_it))
            # noinspection PyUnresolvedReferences
            if sum(nodes_to_remove.flatten()) > max_remove:
                nodes_to_remove = np.argpartition(-loads.z.flatten(),
                                                  -max_remove)[-max_remove:]
                nodes_to_remove = np.unravel_index(nodes_to_remove,
                                                   contact_nodes.shape)
                print('Nodes to remove treated: ', len(nodes_to_remove[0]))
                print('Forcing another iteration')
                force_another_iteration = True
        else:
            nodes_to_remove, nodes_to_add, force_another_iteration = adhesive_pressure(
                loads, deformed_gap, contact_nodes, model)

        node_thresh = n_contact_nodes * node_thresh_percent

        if force_another_iteration or any(nodes_to_remove.flatten()) or sum(
                nodes_to_add.flatten()) > node_thresh:
            contact_nodes[nodes_to_add] = True
            contact_nodes[nodes_to_remove] = False
            n_nodes_added = sum(nodes_to_add.flatten())
            added_nodes_last_it = n_nodes_added if n_nodes_added else added_nodes_last_it  # if any nodes then update
            displacements = z.copy()
            displacements[np.logical_not(contact_nodes)] = np.nan

        else:
            break

        it_num += 1

        if it_num > max_iter:
            warnings.warn(
                'Solution failed to converge on a set of contact nodes while solving for normal interference'
            )
            loads.z[:] = np.nan
            failed = True
            break

    return (loads, ) + disp_tup + (contact_nodes, failed)
コード例 #26
0
 def __init__(self, weights: np.ndarray):
     self._weights = weights.flatten()
     self.dim = self._weights.shape[0]
コード例 #27
0
 def update_points(self, ids: np.ndarray, points: np.ndarray) -> None:
     _, (idx_1, idx_2) = snp.intersect(self.ids.flatten(),
                                       ids.flatten(),
                                       indices=True)
     self._points[idx_1] = points[idx_2]
コード例 #28
0
ファイル: meta_analysis.py プロジェクト: MICA-MNI/BrainStat
def meta_analytic_decoder(
    template: str,
    stat_labels: np.ndarray,
    data_dir: Optional[Union[str, Path]] = None,
):
    """Meta-analytic decoding of surface maps using NeuroSynth or NeuroQuery.

    Parameters
    ----------
    template : str
        Path of a template volume file.
    stat_labels : str, numpy.ndarray, sequence of str or numpy.ndarray
        Path to a label file for the surfaces, numpy array containing the
        labels, or a list containing multiple of the aforementioned.
    data_dir : str, optional
        The directory of the dataset. If none exists, a new dataset will
        be downloaded and saved to this path. If None, the directory defaults to
        your home directory, by default None.


    Returns
    -------
    pandas.DataFrame
        Table with correlation values for each feature.
    """
    data_dir = Path(
        data_dir) if data_dir else data_directories["NEUROSYNTH_DATA_DIR"]
    data_dir.mkdir(exist_ok=True, parents=True)

    logger.info(
        "Fetching Neurosynth feature files. This may take several minutes if you haven't downloaded them yet."
    )
    feature_files = tuple(_fetch_precomputed(data_dir, database="neurosynth"))

    mni152 = nib.load(
        tflow.get("MNI152Lin", resolution=2, desc="brain", suffix="mask"))

    stat_nii = _surf2vol(template, stat_labels.flatten())
    mask = (stat_nii.get_fdata() != 0) & (mni152.get_fdata() != 0)
    stat_vector = stat_nii.get_fdata()[mask]

    feature_names = []
    correlations = np.zeros(len(feature_files))

    logger.info("Running correlations with all Neurosynth features.")
    for i in range(len(feature_files)):
        feature_names.append(
            re.search("__[A-Za-z0-9 ]+",
                      feature_files[i].stem)[0][2:])  # type: ignore
        feature_data = nib.load(feature_files[i]).get_fdata()[mask]
        keep = np.logical_not(
            np.isnan(feature_data)
            | np.isinf(feature_data)
            | np.isnan(stat_vector)
            | np.isinf(stat_vector))
        correlations[i], _ = pearsonr(stat_vector[keep], feature_data[keep])

    df = pd.DataFrame(correlations,
                      index=feature_names,
                      columns=["Pearson's r"])
    return df.sort_values(by="Pearson's r", ascending=False)
コード例 #29
0
    def _calc_u_at_h(
        self,
        u_in: ndarray,
        h_in: ndarray,
        hhere: ndarray,
        mask: ndarray,
        dolog: bool = False,
    ) -> ndarray:
        """Function to interpolate u_in on h_in at hhere.

        Args:
            u_in:
                3D array float32 - velocity on h_in layer, last dim is height
            h_in:
                3D or 1D array float32 - height layer array
            hhere:
                2D array float32 - height grid to interpolate at
            mask:
                2D array of bools - mask the final result for uath
            dolog:
                if True, log interpolation, default False

        Returns:
            2D array float32 - velocity interpolated at h
        """
        u_in = np.ma.masked_less(u_in, 0.0)
        h_in = np.ma.masked_less(h_in, 0.0)
        # h_in.mask = u_in.mask
        # If I allow 1D height grids, I think I cannot do the hop over.

        # Ignores the height at the position where u_in is RMDI,"hops over"
        hhere = np.ma.masked_less(hhere, 0.0)
        upidx = np.argmax(h_in > hhere[:, :, np.newaxis], axis=2)
        # loidx = np.maximum(upidx-1, 0) #if RMDI, need below
        loidx = np.argmin(np.ma.masked_less(hhere[:, :, np.newaxis] - h_in,
                                            0.0),
                          axis=2)

        if h_in.ndim == 3:
            hup = h_in.take(upidx.flatten() +
                            np.arange(0, upidx.size *
                                      h_in.shape[2], h_in.shape[2]))
            hlow = h_in.take(loidx.flatten() +
                             np.arange(0, loidx.size *
                                       h_in.shape[2], h_in.shape[2]))
        elif h_in.ndim == 1:
            hup = h_in[upidx].flatten()
            hlow = h_in[loidx].flatten()
        # pylint: disable=unsubscriptable-object
        uup = u_in.take(upidx.flatten() +
                        np.arange(0, upidx.size *
                                  u_in.shape[2], u_in.shape[2]))
        # pylint: disable=unsubscriptable-object
        ulow = u_in.take(loidx.flatten() +
                         np.arange(0, loidx.size *
                                   u_in.shape[2], u_in.shape[2]))
        mask = mask.flatten()
        uath = np.full(mask.shape, RMDI, dtype=np.float32)
        if dolog:
            uath[mask] = self._interpolate_log(hup[mask], hlow[mask],
                                               hhere.flatten()[mask],
                                               uup[mask], ulow[mask])
        else:
            uath[mask] = self._interpolate_1d(hup[mask], hlow[mask],
                                              hhere.flatten()[mask], uup[mask],
                                              ulow[mask])
        uath = np.reshape(uath, hhere.shape)
        return uath
コード例 #30
0
 def get_gradient_negative_log_marginal_likelihood_from_array(
         params: np.ndarray):
     params = params.flatten()
     return self.get_gradient_negative_log_marginal_likelihood(*params)
コード例 #31
0
ファイル: spline_fxns.py プロジェクト: neurodata/brainlit
def torsion(
    x: np.ndarray,
    t: np.ndarray,
    c: np.ndarray,
    k: np.integer,
    aux_outputs: bool = False,
) -> np.ndarray:
    r"""Compute the torsion of a B-Spline.

    The torsion measures the failure of a curve, `r(u)`, to be planar.
    If the curvature `k` of a curve is not zero, then the torsion is defined as

    .. math::

        \tau = -n \cdot b',

    where `n` is the principal normal vector, and `b'` the derivative w.r.t. the
    arc length `s` of the binormal vector.

    The torsion can also be computed as

    .. math::
        \tau = \lvert r'(t), r''(t), r'''(t) \rvert / \lVert r'(t) \times r''(t) \rVert^2,

    where `r(u)` is the position vector as a function of time.

    Arguments:
        x: A `1xL` array of parameter values where to evaluate the curve.
            It contains the parameter values where the torsion of the B-Spline will
            be evaluated. It is required to be non-empty, one-dimensional, and
            real-valued.
        t: A `1xm` array representing the knots of the B-spline.
            It is required to be a non-empty, non-decreasing, and one-dimensional
            sequence of real-valued elements. For a B-Spline of degree `k`, at least
            `2k + 1` knots are required.
        c: A `dxn` array representing the coefficients/control points of the B-spline.
            Given `n` real-valued, `d`-dimensional points ::math::`x_k = (x_k(1),...,x_k(d))`,
            `c` is the non-empty matrix which columns are ::math::`x_1^T,...,x_N^T`. For a
            B-Spline of order `k`, `n` cannot be less than `m-k-1`.
        k: A non-negative integer representing the degree of the B-spline.

    Returns:
        torsion: A `1xL` array containing the torsion of the B-Spline evaluated at `x`

    References:
    .. [1] Máté Attila, The Frenet–Serret formulas.
        http://www.sci.brooklyn.cuny.edu/~mate/misc/frenet_serret.pdf
    """

    # convert arguments to desired type
    x = np.ascontiguousarray(x)
    t = np.ascontiguousarray(t)
    c = np.ascontiguousarray(c)
    k = operator.index(k)

    if k < 0:
        raise ValueError("The order of the spline must be non-negative")

    check_type(t, np.ndarray)
    t_dim = t.ndim
    if t_dim != 1:
        raise ValueError("t must be one-dimensional")
    if len(t) == 0:
        raise ValueError("t must be non-empty")
    check_iterable_type(t, (np.integer, np.float))
    if (np.diff(t) < 0).any():
        raise ValueError("t must be a non-decreasing sequence")

    check_type(c, np.ndarray)
    c_dim = c.ndim
    if c_dim > 2:
        raise ValueError("c must be 2D max")
    if len(c.flatten()) == 0:
        raise ValueError("c must be non-empty")
    if c_dim == 1:
        check_iterable_type(c, (np.integer, np.float))
        # expand dims so that we can cycle through a single dimension
        c = np.expand_dims(c, axis=0)
    if c_dim == 2:
        for d in c:
            check_iterable_type(d, (np.integer, np.float))
    n_dim = len(c)

    check_type(x, np.ndarray)
    x_dim = x.ndim
    if x_dim != 1:
        raise ValueError("x must be one-dimensional")
    if len(x) == 0:
        raise ValueError("x must be non-empty")
    check_iterable_type(x, (np.integer, np.float))
    L = len(x)

    # evaluate first, second, and third derivatives
    # deriv, dderiv, ddderiv are (d, L) arrays
    deriv = np.empty((n_dim, L))
    dderiv = np.empty((n_dim, L))
    ddderiv = np.empty((n_dim, L))
    for i, dim in enumerate(c):
        spl = BSpline(t, dim, k)
        deriv[i, :] = spl.derivative(nu=1)(x) if k - 1 >= 0 else np.zeros(L)
        dderiv[i, :] = spl.derivative(nu=2)(x) if k - 2 >= 0 else np.zeros(L)
        ddderiv[i, :] = spl.derivative(nu=3)(x) if k - 3 >= 0 else np.zeros(L)
    # transpose derivs
    deriv = deriv.T
    dderiv = dderiv.T
    ddderiv = ddderiv.T

    cross = np.cross(deriv, dderiv)

    # Could be more efficient by only computing dot products of corresponding rows
    num = np.diag((cross @ ddderiv.T))
    denom = np.linalg.norm(cross, axis=1)**2

    torsion = np.nan_to_num(num / denom)

    if aux_outputs == True:
        return torsion, deriv, dderiv, ddderiv
    else:
        return torsion
コード例 #32
0
ファイル: features.py プロジェクト: fmcc/mss_layout_analysis
def prepare_features(arr: np.ndarray):
    """ Prepare an FFT matrix for use as a set of features. """
    return arr.flatten()
コード例 #33
0
def spi(
    values: np.ndarray,
    scale: int,
    distribution: Distribution,
    data_start_year: int,
    calibration_year_initial: int,
    calibration_year_final: int,
    periodicity: compute.Periodicity,
    fitting_params: Dict = None,
) -> np.ndarray:
    """
    Computes SPI (Standardized Precipitation Index).

    :param values: 1-D numpy array of precipitation values, in any units,
        first value assumed to correspond to January of the initial year if
        the periodicity is monthly, or January 1st of the initial year if daily
    :param scale: number of time steps over which the values should be scaled
        before the index is computed
    :param distribution: distribution type to be used for the internal
        fitting/transform computation
    :param data_start_year: the initial year of the input precipitation dataset
    :param calibration_year_initial: initial year of the calibration period
    :param calibration_year_final: final year of the calibration period
    :param periodicity: the periodicity of the time series represented by the
        input data, valid/supported values are 'monthly' and 'daily'
        'monthly' indicates an array of monthly values, assumed to span full
         years, i.e. the first value corresponds to January of the initial year
         and any missing final months of the final year filled with NaN values,
         with size == # of years * 12
         'daily' indicates an array of full years of daily values with 366 days
         per year, as if each year were a leap year and any missing final months
         of the final year filled with NaN values, with array size == (# years * 366)
    :param fitting_params: optional dictionary of pre-computed distribution
        fitting parameters, if the distribution is gamma then this dict should
        contain two arrays, keyed as "alphas" and "betas", and if the
        distribution is Pearson then this dict should contain four arrays keyed
        as "probabilities_of_zero", "locs", "scales", and "skews"
    :return SPI values fitted to the gamma distribution at the specified time
        step scale, unitless
    :rtype: 1-D numpy.ndarray of floats of the same length as the input array
        of precipitation values
    """

    # we expect to operate upon a 1-D array, so if we've been passed a 2-D array
    # then we flatten it, otherwise raise an error
    shape = values.shape
    if len(shape) == 2:
        values = values.flatten()
    elif len(shape) != 1:
        message = "Invalid shape of input array: {shape}".format(shape=shape) + \
                  " -- only 1-D and 2-D arrays are supported"
        _logger.error(message)
        raise ValueError(message)

    # if we're passed all missing values then we can't compute
    # anything, so we return the same array of missing values
    if (np.ma.is_masked(values) and values.mask.all()) or np.all(
            np.isnan(values)):
        return values

    # clip any negative values to zero
    if np.amin(values) < 0.0:
        _logger.warn(
            "Input contains negative values -- all negatives clipped to zero")
        values = np.clip(values, a_min=0.0, a_max=None)

    # remember the original length of the array, in order to facilitate
    # returning an array of the same size
    original_length = values.size

    # get a sliding sums array, with each time step's value scaled
    # by the specified number of time steps
    values = compute.sum_to_scale(values, scale)

    # reshape precipitation values to (years, 12) for monthly,
    # or to (years, 366) for daily
    if periodicity is compute.Periodicity.monthly:

        values = utils.reshape_to_2d(values, 12)

    elif periodicity is compute.Periodicity.daily:

        values = utils.reshape_to_2d(values, 366)

    else:

        raise ValueError("Invalid periodicity argument: %s" % periodicity)

    if distribution is Distribution.gamma:

        # get (optional) fitting parameters if provided
        if fitting_params is not None:
            alphas = fitting_params["alpha"]
            betas = fitting_params["beta"]
        else:
            alphas = None
            betas = None

        # fit the scaled values to a gamma distribution
        # and transform to corresponding normalized sigmas
        values = compute.transform_fitted_gamma(
            values,
            data_start_year,
            calibration_year_initial,
            calibration_year_final,
            periodicity,
            alphas,
            betas,
        )
    elif distribution is Distribution.pearson:

        # get (optional) fitting parameters if provided
        if fitting_params is not None:
            probabilities_of_zero = fitting_params["prob_zero"]
            locs = fitting_params["loc"]
            scales = fitting_params["scale"]
            skews = fitting_params["skew"]
        else:
            probabilities_of_zero = None
            locs = None
            scales = None
            skews = None

        # fit the scaled values to a Pearson Type III distribution
        # and transform to corresponding normalized sigmas
        values = compute.transform_fitted_pearson(
            values,
            data_start_year,
            calibration_year_initial,
            calibration_year_final,
            periodicity,
            probabilities_of_zero,
            locs,
            scales,
            skews,
        )

    else:

        message = "Unsupported distribution argument: " + \
                  "{dist}".format(dist=distribution)
        _logger.error(message)
        raise ValueError(message)

    # clip values to within the valid range, reshape the array back to 1-D
    values = np.clip(values, _FITTED_INDEX_VALID_MIN,
                     _FITTED_INDEX_VALID_MAX).flatten()

    # return the original size array
    return values[0:original_length]
コード例 #34
0
 def add_predictions(self, predictions: np.ndarray):
     self.counter.update(predictions.flatten())
     if predictions.ndim == 2:
         self.buffer.extend(predictions)
     else:
         self.buffer.append(predictions)
コード例 #35
0
def categorical_accuracy(pred: np.ndarray, label: np.ndarray) -> np.ndarray:
    pred_label = np.argmax(pred, axis=1)
    return (pred_label == label.flatten()).mean()
コード例 #36
0
ファイル: HackRF.py プロジェクト: zjywlive/urh
 def iq_to_bytes(samples: np.ndarray):
     arr = Array("B", 2 * len(samples), lock=False)
     numpy_view = np.frombuffer(arr, dtype=np.uint8)
     numpy_view[:] = samples.flatten(order="C")
     return arr
コード例 #37
0
 def _flatten_image(self, image: np.ndarray) -> np.ndarray:
     # Flatten the image from (w, h, 3n) to (3n * w * h).
     image = image.transpose()
     assert image.shape[0] == self.channels, image.shape
     return image.flatten()
コード例 #38
0
ファイル: pitch.py プロジェクト: stjordanis/muspy
def from_pitch_representation(
    array: ndarray,
    resolution: int = DEFAULT_RESOLUTION,
    program: int = 0,
    is_drum: bool = False,
    use_hold_state: bool = False,
    default_velocity: int = 64,
) -> Music:
    """Decode pitch-based representation into a Music object.

    Parameters
    ----------
    array : ndarray
        Array in pitch-based representation to decode. Will be casted to
        integer if not of integer type.
    resolution : int
        Time steps per quarter note. Defaults to `muspy.DEFAULT_RESOLUTION`.
    program : int, optional
        Program number according to General MIDI specification [1].
        Acceptable values are 0 to 127. Defaults to 0 (Acoustic Grand
        Piano).
    is_drum : bool, optional
        A boolean indicating if it is a percussion track. Defaults to
        False.
    use_hold_state : bool
        Whether to use a special state for holds. Defaults to False.
    default_velocity : int
        Default velocity value to use when decoding. Defaults to 64.

    Returns
    -------
    :class:`muspy.Music` object
        Decoded Music object.

    """
    # Cast the array to integer
    if not np.issubdtype(array.dtype, np.integer):
        array = array.astype(np.int)

    # Find the note boundaries
    notes: List[Note] = []
    diff = np.diff(array.flatten(), prepend=-1, append=-1)
    boundaries = np.nonzero(diff)[0]

    # Decode pitches
    if use_hold_state:
        is_awaiting_hold = False
        for start, end in zip(boundaries[:-1], boundaries[1:]):
            # Skip rests
            if array[start] == 128:
                is_awaiting_hold = False
                continue

            # Hold
            if array[start] == 129:
                # Skip a hold that does not follow any pitch
                if not is_awaiting_hold:
                    continue

                notes[-1].duration += end - start
                is_awaiting_hold = False

            # Pitch
            else:
                note = Note(
                    time=start,
                    duration=end - start,
                    pitch=array[start],
                    velocity=default_velocity,
                )
                notes.append(note)
                is_awaiting_hold = True

    else:
        for start, end in zip(boundaries[:-1], boundaries[1:]):
            # Skip rests
            if array[start] == 128:
                continue

            # Pitch
            note = Note(
                time=start,
                duration=end - start,
                pitch=array[start],
                velocity=default_velocity,
            )
            notes.append(note)

    # Sort the notes
    notes.sort(key=attrgetter("time", "pitch", "duration", "velocity"))

    # Create the Track and Music objects
    track = Track(program=program, is_drum=is_drum, notes=notes)
    music = Music(resolution=resolution, tracks=[track])

    return music