Beispiel #1
0
    def update_line(self, line_fit: np.array, line_fitx: np.array,
                    line_fity: np.array):
        """
        Updates the line.
        :param line_fit: polynom of the current line
        :param line_fitx: x values of the current line
        """
        # update lane fit
        if line_fit.any():
            self.detected = True
            self.recent_fits.append(line_fit)
            if len(self.recent_fits) > N:
                self.recent_fits.pop(0)
            self.current_fit = line_fit
            self.best_fit = np.average(self.recent_fits, axis=0)
        else:
            self.detected = False

        # update line pxs
        if line_fitx.any():
            self.recent_xfitted.append(line_fitx)
            if len(self.recent_xfitted) > N:
                self.recent_xfitted.pop(0)
            self.bestx = np.average(self.recent_xfitted, axis=0)
            self.diffs = np.sum(self.bestx - line_fitx)

        # calc radius
        self.radius_of_curvature = crv.cal_rad(self.bestx, line_fity)
        self.line_base_pos = None

        self.allx = line_fitx
        self.ally = line_fity
Beispiel #2
0
    def apply_plasticity(self, delta_t: float, yield_stress_model,
                         plasticity_criterion, mask_mesh: np.array):
        """
        Apply plasticity treatment if criterion is activated :

        - compute yield stress
        - tests plasticity criterion
        - compute plastic strain rate for plastic cells

        :param delta_t: time step
        :param yield_stress_model: model to compute the yield stress
        :param plasticity_criterion: model for the plasticity criterion
        :param mask_mesh: mask cells in projectile or target
        """
        # La méthode apply_plastic_corrector_on_deviatoric_stress_tensor modifie
        # la variable dev_stress_new et doit donc être appelée à la fin de
        # l'étape du calcul de plasticité pour conserver la prédiction élastique dans
        # le calcul du taux de déformation plastique, plasticité cumulée, ...

        # 1) Compute yield stress
        self.cells.compute_yield_stress(yield_stress_model, mask_mesh)
        if mask_mesh.any() and self.cells.enriched.any():
            self.cells.compute_enriched_yield_stress(yield_stress_model)

        # 2) Get plastic cells (verification of the plasticity criterion)
        # Criterion is tested for both classical and enriched cells
        self._get_plastic_cells(plasticity_criterion, mask_mesh)

        # Get plastic cells either in projectile or in target
        mask = np.logical_and(mask_mesh, self.__plastic_cells)  # pylint: disable=assignment-from-no-return
        # 3) Plasticity treatment for classical plastic cells and left part of enriched cells
        self.cells.apply_plasticity(mask, delta_t)

        # 4) Plasticity treatment for enriched plastic cells (right part)
        self.cells.apply_plasticity_enr(mask_mesh, delta_t)
Beispiel #3
0
def sample_discrete_distribution(bin_centres : np.array,
                                 bin_weights : np.array,
                                 size : int = 1) -> np.array:
    if not bin_weights.any():
        return np.zeros(size)
    return np.random.choice(bin_centres,
                            p = bin_weights,
                            size = size)
Beispiel #4
0
def _scale_distribution(post: np.array, support: np.array) -> np.array:
    """Scale the distribution to have a maximum of 1, and a minimum of 0.

    :param post: Values of the KDE cross-section
    :param support: Support of the KDE cross-section
    :return: The scaled distribution
    """

    post[np.abs(post) < 1e-8] = 0  # Rule of thumb

    if post.any():  # If there is any value
        a = integrate.simps(y=np.abs(post),
                            x=support)  # Integrate the absolute values
        post *= 1 / a  # Scale the distribution

    return post
Beispiel #5
0
    def histogramThresholding(self, image: np.array,
                              hist: np.array) -> np.array:
        if hist.any() == None:
            hist = self.histogram(image)

        bins = len(hist)
        histogramStart = 0
        while hist[histogramStart] < 5:
            histogramStart += 1

        histogramEnd = bins - 1
        while hist[histogramEnd] < 5:
            histogramEnd -= 1

        maxVal = 255.0
        histogramCenter = int(
            round(np.average(np.linspace(0, maxVal, bins), weights=hist)))
        left = np.sum(hist[histogramStart:histogramCenter])
        right = np.sum(hist[histogramCenter:histogramEnd + 1])

        while histogramStart < histogramEnd:
            if left > right:
                left -= hist[histogramStart]
                histogramStart += 1
            else:
                right -= hist[histogramEnd]
                histogramEnd -= 1
            calculatedCenter = int(round((histogramEnd + histogramStart) / 2))

            if calculatedCenter < histogramCenter:
                left -= hist[histogramCenter]
                right += hist[histogramCenter]
            elif calculatedCenter > histogramCenter:
                left += hist[histogramCenter]
                right -= hist[histogramCenter]

            histogramCenter = calculatedCenter

        imageCopy = image.copy()
        imageCopy[imageCopy > histogramCenter] = 255
        imageCopy[imageCopy < histogramCenter] = 0
        imageCopy = imageCopy.astype(np.uint8)
        return imageCopy.reshape(image.shape)
Beispiel #6
0
def it_sampling(
    pdf,
    num_samples: int = 1,
    lower_bd=-np.inf,
    upper_bd=np.inf,
    k: int = None,
    cdf_y: np.array = None,
    return_cdf: bool = False,
):
    """Sample from an arbitrary, un-normalized PDF.

    :param pdf: function, float -> float The probability density function (not necessarily normalized). Must take floats
     or ints as input, and return floats as an output.
    :param num_samples: The number of samples to be generated.
    :param lower_bd: Lower bound of the support of the pdf. This parameter allows one to manually establish cutoffs for
     the density.
    :param upper_bd: Upper bound of the support of the pdf.
    :param k: Step number between lower_bd and upper_bd
    :param cdf_y: precomputed values of the CDF
    :param return_cdf: Option to return the computed CDF values
    :return: samples: An array of samples from the provided PDF, with support between lower_bd and upper_bd.
    """
    if k is None:
        k = 200  # Default step size

    if cdf_y is None:
        cdf = get_cdf(pdf)  # CDF of the pdf
        cdf_y = cdf(np.linspace(lower_bd, upper_bd, k))  # CDF values

    if return_cdf:
        return cdf_y

    else:
        if cdf_y.any():
            seeds = uniform(0, 1, num_samples)  # Uniformly distributed seeds
            simple_samples = np.interp(x=seeds, xp=cdf_y, fp=pdf.x)  # Samples
        else:
            simple_samples = np.zeros(num_samples)  # Samples

        return simple_samples
Beispiel #7
0
    def update_gradients(self,
                         predicted: np.array,
                         real: np.array,
                         input_center_word: int,
                         sampling_ids: np.array = None) -> None:
        """

        :param predicted: output of model, untreated
        :param real: kind of one-hot vector with all the context words having a one, normalized
        :param sampling_ids: ids for negative sampling, shouldn't be set if you used softmax in forward
        :param input_center_word: index of input center word, not a vector
        """

        if sampling_ids.any():
            real = real.reshape(predicted.shape)
            center_grad = np.matmul(
                self.embedding_matrix_context_grad[sampling_ids].T,
                (predicted - real))
            self.embedding_matrix_center_grad[sampling_ids] += center_grad.T

            context_grad = np.outer((predicted - real),
                                    self._center_embedding(input_center_word))
            self.embedding_matrix_context_grad[sampling_ids] += context_grad
            return None

        center_grad = np.matmul(self.embedding_matrix_context.T,
                                (predicted - real).reshape(
                                    self.vocabulary_size, 1))
        self.embedding_matrix_center_grad[
            input_center_word] += center_grad.reshape(self.hidden_size)

        context_grad = np.matmul(
            (predicted - real).reshape(1, self.vocabulary_size),
            self.embedding_matrix_center)
        self.embedding_matrix_context_grad[
            input_center_word] += context_grad.reshape(self.hidden_size)
Beispiel #8
0
def inverse_cdf(x: np.array, y: np.array, percentile: float) -> float:
    if not y.any():
        return np.inf
    return x[inverse_cdf_index(y, percentile)]
Beispiel #9
0
def collective_feature_efficiency(X: numpy.array,
                                  y: numpy.array,
                                  min_resid: float = 0.1,
                                  n_jobs: int = None) -> float:
    """
    Collective feature efficiency.
    C4 starts by identifying the feature with highest correlation to the output. 
    All examples with asmall residual value (|εi|≤0.1) after a linear fit between this feature and the target attribute are removed. 
    Then, the most correlated feature to the remaining data points is found and the previous process is repeated until all features have been analyzed or no example remains.

    Parameters
    ----------
    X : numpy.array
        2d-array with features columns.
    y : numpy.array
        Array of response values.
    min_resid : float, optional (default=0.1)
        Minimum residual value for observation remotion.        
    n_jobs : int or None, optional (default=None)
        The number of jobs to run in parallel. None means 1 unless. -1 means using all processors.

    Return
    ------
    float:
        The number of observations that put residuos lower than 0.1
        over total number of observations.
    """
    # check if is dataframe
    X = (X.values if isinstance(X, pandas.DataFrame) else X)

    # check if y is dataframe or series
    y = (y.values if isinstance(y, pandas.DataFrame)
         or isinstance(y, pandas.Series) else y)

    # initial parameters
    A = list(range(X.shape[1]))
    n = X.shape[0]
    mcol = X.shape[1]
    num_cores = multiprocessing.cpu_count()
    n_jobs = (1 if not n_jobs else
              (num_cores if
               (num_cores < n_jobs) or (n_jobs == -1) else n_jobs))

    def calculateCorr(x_j: numpy.array, y: numpy.array, j: int,
                      A: list) -> tuple:
        """Calculate absolute Spearman correlation for x_j in set A."""
        corr = (abs(spearmanr(x_j, y)[0]) if j in A else .0)

        return (j, corr)

    while A and X.any():
        pos_rho_list = Parallel(n_jobs=n_jobs)(
            delayed(calculateCorr)(X[:, j], y, j, A) for j in range(mcol))
        rho_list = [t[1] for t in sorted(pos_rho_list)]

        if sum(rho_list) == .0:
            break
        m = numpy.ndarray.argmax(numpy.array(rho_list))
        A.remove(m)
        model = LinearRegression()
        x_j = X[:, m].reshape((-1, 1))
        y = y.reshape((-1, 1))
        model.fit(x_j, y)

        resid = y - model.predict(x_j)
        id_remove = abs(resid.flatten()) > min_resid
        X = X[id_remove, :]
        y = y[id_remove]

    return len(y) / n
    def populate_explorer_with_points(self, point_index: np.array,
                                      linenames: np.array,
                                      point_times: np.array, beam: np.array,
                                      x: np.array, y: np.array, z: np.array,
                                      tvu: np.array, status: np.array,
                                      id: np.array):
        """
        Show the attributes for each point, where each point is in its own row.  All the inputs are of the same size,
        where size equals the number of points

        Parameters
        ----------
        point_index
            point index for the points, corresponds to the index of the point in the 3dview selected points
        linenames
            multibeam file name that the points come from
        point_times
            time of the soundings/points
        beam
            beam number of the points
        x
            easting of the points
        y
            northing of the points
        z
            depth of the points
        tvu
            total vertical uncertainty of the points
        status
            rejected/amplitude/phase return qualifier of the points
        id
            data container that the points come from
        """

        self.setSortingEnabled(False)
        if self.mode != 'point':
            self.set_mode('point')
        self.clear_explorer_data()
        if z.any():
            converted_status = np.full(status.shape[0], '', dtype=object)
            converted_status[np.where(status == 0)[0]] = 'amplitude'
            converted_status[np.where(status == 1)[0]] = 'phase'
            converted_status[np.where(status == 2)[0]] = 'rejected'
            for cnt, idx in enumerate(point_index):
                next_row = self.rowCount()
                self.insertRow(next_row)
                self.setItem(next_row, 0, QtWidgets.QTableWidgetItem(str(idx)))
                self.setItem(next_row, 1,
                             QtWidgets.QTableWidgetItem(linenames[cnt]))
                formattedtime = datetime.fromtimestamp(
                    float(point_times[cnt]), tz=timezone.utc).strftime('%c')
                self.setItem(next_row, 2,
                             QtWidgets.QTableWidgetItem(str(formattedtime)))
                self.setItem(next_row, 3,
                             QtWidgets.QTableWidgetItem(str(int(beam[cnt]))))
                self.setItem(next_row, 4,
                             QtWidgets.QTableWidgetItem(str(x[cnt])))
                self.setItem(next_row, 5,
                             QtWidgets.QTableWidgetItem(str(y[cnt])))
                self.setItem(next_row, 6,
                             QtWidgets.QTableWidgetItem(str(round(z[cnt], 3))))
                self.setItem(
                    next_row, 7,
                    QtWidgets.QTableWidgetItem(str(round(tvu[cnt], 3))))
                self.setItem(
                    next_row, 8,
                    QtWidgets.QTableWidgetItem(str(converted_status[cnt])))
                self.setItem(next_row, 9,
                             QtWidgets.QTableWidgetItem(str(id[cnt])))
        self.setSortingEnabled(True)