コード例 #1
0
def calc_metric(candidate_img_blurred: np.ndarray, candidate_kpts: np.ndarray, candidate_length: int,
url_img_blurred: np.ndarray, url_kpts: np.ndarray, url_length: int, kpt_pos: tuple) -> float:
    candidate_kpts, url_kpts = serial.deserialize_keypoints(candidate_kpts), serial.deserialize_keypoints(url_kpts)
    m, n = kpt_pos[0], kpt_pos[1]
    #print((m,n))
    if(m == -1):
        return 1
    candidate_img_blurred, url_img_blurred = image.adjust_size(candidate_img_blurred, url_img_blurred)
    c_x, c_y = candidate_img_blurred.shape
    u_x, u_y = url_img_blurred.shape
    x, y = np.array(candidate_kpts[n].pt) - np.array(url_kpts[m].pt)
    if(x + c_x < 0 or x + u_x < 0):
        return 1
    if(y + c_y < 0 or y + u_y < 0):
        return 1
    candidate_img_blurred = image.align_text(candidate_img_blurred, (int(x), int(y)))
    candidate_img_blurred, url_img_blurred =  candidate_img_blurred.astype(int), url_img_blurred.astype(int)
    img_diff = abs(candidate_img_blurred - url_img_blurred)
    img_diff = img_diff.astype(int)
    divisor = max(candidate_img_blurred.size, url_img_blurred.size)
    diff = len(np.where(img_diff > 10)[0]) / float(divisor)
    return diff
    penalty = abs((float(candidate_length) - url_length)) / max(candidate_length, url_length)
    diff = diff / (1.0 - penalty * 10)
    return abs(diff)
       
コード例 #2
0
ファイル: coordinate.py プロジェクト: TimothyHelton/strumenti
def pol2cart(pts: np.ndarray, degrees: bool=False) -> np.ndarray:
    """Convert polar or cylindrical coordinates to Cartesian coordinates.

    :param ndarray pts: array of polar points (rho, theta) or cylindrical \
        points (rho, theta, phi)
    :param bool degrees: if true results will be presented in degrees \
        (default: False)
    :returns: [x, y, (*z*)]
    :rtype: ndarray

    >>> pol2cart(np.array([[2**0.5, 45], [1, 90]]), degrees=True)
    array([[  1.00000000e+00,   1.00000000e+00],
           [  6.12323400e-17,   1.00000000e+00]])

    >>> pol2cart(np.array([[2**0.5, 45, 1], [1, 90, 2]]), degrees=True)
    array([[  1.00000000e+00,   1.00000000e+00,   1.00000000e+00],
           [  6.12323400e-17,   1.00000000e+00,   2.00000000e+00]])
    """
    dim = element_dimension(pts, [2, 3])

    if degrees:
        pts = pts.astype(float)
        pts[:, 1] = np.radians(pts[:, 1])

    x = pts[:, 0] * np.cos(pts[:, 1])
    y = pts[:, 0] * np.sin(pts[:, 1])

    if dim == 2:
        return np.c_[x, y]
    else:
        return np.c_[x, y, pts[:, 2]]
コード例 #3
0
ファイル: sf_clustering.py プロジェクト: ArnaudPel/CamKifu
    def find_stones(self, img: np.ndarray, rs=0, re=gsize, cs=0, ce=gsize, **kwargs):
        """ The stones detection main algorithm, which is based on k-means pixel clustering.

        Note: the three colors (E, B, W) must be present in the image for this statistical method to work.

        Args:
            img: ndarray
                The Goban image.
            rs: int - inclusive
            re: int - exclusive
                Row start and end indexes. Can be used to restrain check to a subregion.
            cs: int - inclusive
            ce: int - exclusive
                Column start and end indexes. Can be used to restrain check to a subregion.
            kwargs:
                Allowing for keyword args enables multiple find methods to be called indifferently. See SfMeta.

        Returns stones: ndarray
            A matrix containing the detected stones in the desired subregion of the image,
            or None if the result could not be trusted or something failed.
        """
        if img.dtype is not np.float32:
            img = img.astype(np.float32)
        ratios, centers = self.cluster_colors(img, rs=rs, re=re, cs=cs, ce=ce)
        stones = self.interpret_ratios(ratios, centers, r_start=rs, r_end=re, c_start=cs, c_end=ce)
        if not self.check_density(stones):
            return None  # don't trust this result
        return stones
コード例 #4
0
ファイル: coordinate.py プロジェクト: TimothyHelton/strumenti
def sphere2cart(pts: np.ndarray, degrees: bool=False) -> np.ndarray:
    """Convert spherical coordinates to Cartesian coordinates.

    :param ndarray pts: array of spherical coordinates
    :param bool degrees: if true results will be presented in degrees \
        (default: False)
    :returns: [x, y, z]
    :rtype: ndarray

    >>> sphere2cart(np.array([[1, 0, 90], [1, 90, 90]]), degrees=True)
    array([[  1.00000000e+00,   0.00000000e+00,   6.12323400e-17],
           [  6.12323400e-17,   1.00000000e+00,   6.12323400e-17]])
    """
    element_dimension(pts, 3)

    if degrees:
        pts = pts.astype(float)
        pts[:, 1:3] = np.radians(pts[:, 1:3])

    r = pts[:, 0]
    theta = pts[:, 1]
    phi = pts[:, 2]
    x = r * np.sin(phi) * np.cos(theta)
    y = r * np.sin(phi) * np.sin(theta)
    z = r * np.cos(phi)

    return np.c_[x, y, z]
コード例 #5
0
    def _apply_single(self, data : np.ndarray):
        """
        Read an image and returns it as a floating point array.
        The optional page number allows images from files containing multiple
        images to be addressed.  Byte and short arrays are rescaled to
        the range 0...1 (unsigned) or -1...1 (signed).
        :rtype np.array in range 0...1 (unsigned) or -1...1 (signed)
        """
        if data.dtype == np.dtype('uint8'):
            data = data / 255.0
        if data.dtype == np.dtype('int8'):
            data = data / 127.0
        elif data.dtype == np.dtype('uint16'):
            data = data / 65536.0
        elif data.dtype == np.dtype('int16'):
            data = data / 32767.0
        elif data.dtype in [np.dtype('f'), np.dtype('float32'), np.dtype('float64')]:
            pass
        elif data.dtype == bool:
            data = data.astype(np.float32)
        else:
            raise Exception("unknown image type: {}".format(data.dtype))

        if data.ndim == 3:
            data = np.mean(data, axis=2)

        return data, None
コード例 #6
0
ファイル: pose.py プロジェクト: sylvaus/quaternion-sim
    def __init__(self,
                 orientation: Quaternion = Quaternion(),
                 position: ndarray = array([0, 0, 0], dtype=float)) -> None:

        self.orientation = orientation

        assert position.shape == (3,) or position.shape == (3, 1), \
            "The position should be an array of size (3,1)"
        self.position = position.astype(dtype=float).reshape(3, 1)
コード例 #7
0
ファイル: metadata.py プロジェクト: frank-y-liu/gatk
    def __init__(self,
                 sample_name: str,
                 ploidy_j: np.ndarray,
                 ploidy_genotyping_quality_j: np.ndarray,
                 contig_list: List[str],
                 check_germline_contig_ploidy_for_homo_sapiens: bool = True):
        assert ploidy_j.ndim == 1
        assert ploidy_j.size == len(contig_list)
        assert ploidy_genotyping_quality_j.ndim == 1
        assert ploidy_genotyping_quality_j.size == len(contig_list)

        self.sample_name = sample_name
        self.contig_list = contig_list
        self.ploidy_j = ploidy_j.astype(types.small_uint)
        self.ploidy_genotyping_quality_j = ploidy_genotyping_quality_j.astype(types.floatX)
        self._contig_map = {contig: j for j, contig in enumerate(contig_list)}

        if check_germline_contig_ploidy_for_homo_sapiens:
            self.check_germline_contig_ploidy_for_homo_sapiens()
コード例 #8
0
ファイル: io.py プロジェクト: void42/svbrdf-renderer
def save_pfm_texture(filename: str, tex: np.ndarray):
    if tex.dtype != np.float32:
        print('Input is not 32 bit precision: converting to 32 bits.')
        tex = tex.astype(np.float32)
    height, width = tex.shape[0], tex.shape[1]
    with open(filename, 'wb+') as f:
        f.write('{}\n'.format(HEADER_MAGIC).encode())
        f.write('{} {}\n'.format(width, height).encode())
        f.write('-1.0\n'.encode())
        f.write(tex.tobytes())
コード例 #9
0
ファイル: solids.py プロジェクト: sylvaus/quaternion-sim
    def __init__(self,
                 name: str,
                 pose: Pose = Pose(),
                 init_pose: Pose = Pose(),
                 vel: ndarray = array([0.0, 0.0, 0.0]),
                 ang_vel: ndarray = array([0.0, 0.0, 0.0]),
                 ref_frame: str = None,
                 mass: float = 1.0,
                 inertia: matrix = identity(3),
                 ambient_color: list = None,
                 diffuse_color: list = None):

        self.name = name
        self.pose = pose
        # the init pose is deep copied to be sure it won't be modified involuntary
        self.init_pose = cp.deepcopy(init_pose)

        self.vel = vel.astype(dtype=float).reshape(3, 1)
        self.ang_vel = ang_vel.astype(dtype=float).reshape(3, 1)

        if ref_frame is None:
            self.ref_frame = None
            self.frame = None

        else:
            self.ref_frame = ref_frame
            self.frame = Frame("frame_{0}".format(self.name),
                               self.pose,
                               self.ref_frame)

        self.mass = mass
        self.inertia = inertia

        if ambient_color is None:
            self.ambient_color = [0.0, 0.0, 0.0, 0.0]
        else:
            self.ambient_color = ambient_color

        if diffuse_color is None:
            self.diffuse_color = [0.0, 0.0, 0.0, 0.0]
        else:
            self.diffuse_color = diffuse_color
コード例 #10
0
 def convert_to_feature(
         self,
         spectrogram: numpy.ndarray,
         acoustic_feature: AcousticFeature,
 ):
     acoustic_feature = acoustic_feature.astype_only_float(numpy.float64)
     f_out = AcousticFeature(
         f0=acoustic_feature.f0,
         spectrogram=spectrogram.astype(numpy.float64),
         aperiodicity=acoustic_feature.aperiodicity,
         mfcc=acoustic_feature.mfcc,
         voiced=acoustic_feature.voiced,
     )
     return f_out
コード例 #11
0
 def convert_to_audio(
         self,
         input: numpy.ndarray,
         acoustic_feature: AcousticFeature,
         sampling_rate: int,
 ):
     acoustic_feature = acoustic_feature.astype_only_float(numpy.float64)
     out = pyworld.synthesize(
         f0=acoustic_feature.f0.ravel(),
         spectrogram=input.astype(numpy.float64),
         aperiodicity=acoustic_feature.aperiodicity,
         fs=sampling_rate,
         frame_period=self._param.acoustic_feature_param.frame_period,
     )
     return Wave(out, sampling_rate=sampling_rate)
コード例 #12
0
ファイル: metadata.py プロジェクト: frank-y-liu/gatk
    def __init__(self,
                 sample_name: str,
                 n_j: np.ndarray,
                 contig_list: List[str]):
        assert n_j.ndim == 1
        assert n_j.size == len(contig_list)

        self.sample_name = sample_name
        self.contig_list = contig_list

        # total count per contig
        self.n_j = n_j.astype(types.med_uint)

        # total count
        self.n_total = np.sum(self.n_j)
        self._contig_map = {contig: j for j, contig in enumerate(contig_list)}
コード例 #13
0
ファイル: keras_regression.py プロジェクト: csxeba/NitaGeo
 def wgs_predict(self, questions: np.ndarray, labels: np.ndarray=None, y: np.ndarray=None):
     """Dumps the coordinate predictions into a text file"""
     from csxdata.utilities.nputils import haversine
     preds = self.network.predict(questions)
     dist = haversine(self.dataframe.upscale(preds), y) if y is not None else None
     preds = self.dataframe.upscale(preds).astype(str)
     labels = np.atleast_2d(labels).T
     preds = np.concatenate((labels, preds), axis=1)
     if y is not None:
         preds = np.concatenate((preds, y.astype(str), np.atleast_2d(dist).T.astype(str)), axis=1)
     preds.tolist()
     preds = ["\t".join(pr.tolist()) for pr in preds]
     chain = "\n".join(preds)
     chain = chain.replace(".", ",")
     header = ["Azon", "Y", "X"]
     if y is not None:
         header += ["real_Y", "real_X", "Haversine"]
     chain = "\t".join(header) + "\n" + chain
     with open("logs/" + self.name + "_predictions.csv", "w") as f:
         f.write(chain)
         f.close()
コード例 #14
0
    def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
        """
        Generate adversarial samples and return them in an array.

        :param x: An array with the original inputs to be attacked.
        :param y: An array with the original labels to be predicted.
        :return: An array holding the adversarial examples.
        """
        x = x.astype(ART_NUMPY_DTYPE)
        preds = self.estimator.predict(x, batch_size=self.batch_size)

        if self.estimator.nb_classes == 2 and preds.shape[1] == 1:
            raise ValueError(
                "This attack has not yet been tested for binary classification with a single output classifier."
            )

        if divmod(x.shape[2] - self.freq_dim, self.stride)[1] != 0:
            raise ValueError(
                "Incompatible value combination in image height/width, freq_dim and stride detected. "
                "Adapt these parameters to fulfill the following conditions: "
                "divmod(image_height - freq_dim, stride)[1] == 0 "
                "and "
                "divmod(image_width - freq_dim, stride)[1] == 0"
            )

        if y is None:
            if self.targeted:
                raise ValueError("Target labels `y` need to be provided for a targeted attack.")

            # Use model predictions as correct outputs
            logger.info("Using the model prediction as the correct label for SimBA.")
            y_i = np.argmax(preds, axis=1)
        else:
            y_i = np.argmax(y, axis=1)

        desired_label = y_i[0]
        current_label = np.argmax(preds, axis=1)[0]
        last_prob = preds.reshape(-1)[desired_label]

        if self.estimator.channels_first:
            nb_channels = x.shape[1]
        else:
            nb_channels = x.shape[3]

        n_dims = np.prod(x.shape)

        if self.attack == "px":
            if self.order == "diag":
                indices = self.diagonal_order(x.shape[2], nb_channels)[: self.max_iter]
            elif self.order == "random":
                indices = np.random.permutation(n_dims)[: self.max_iter]
            indices_size = len(indices)
            while indices_size < self.max_iter:
                if self.order == "diag":
                    tmp_indices = self.diagonal_order(x.shape[2], nb_channels)
                elif self.order == "random":
                    tmp_indices = np.random.permutation(n_dims)
                indices = np.hstack((indices, tmp_indices))[: self.max_iter]
                indices_size = len(indices)
        elif self.attack == "dct":
            indices = self._block_order(x.shape[2], nb_channels, initial_size=self.freq_dim, stride=self.stride)[
                : self.max_iter
            ]
            indices_size = len(indices)
            while indices_size < self.max_iter:
                tmp_indices = self._block_order(x.shape[2], nb_channels, initial_size=self.freq_dim, stride=self.stride)
                indices = np.hstack((indices, tmp_indices))[: self.max_iter]
                indices_size = len(indices)

            def trans(var_z):
                return self._block_idct(var_z, block_size=x.shape[2])

        clip_min = -np.inf
        clip_max = np.inf
        if self.estimator.clip_values is not None:
            clip_min, clip_max = self.estimator.clip_values

        term_flag = 1
        if self.targeted:
            if desired_label != current_label:
                term_flag = 0
        else:
            if desired_label == current_label:
                term_flag = 0

        nb_iter = 0
        while term_flag == 0 and nb_iter < self.max_iter:
            diff = np.zeros(n_dims).astype(ART_NUMPY_DTYPE)
            diff[indices[nb_iter]] = self.epsilon

            if self.attack == "dct":
                left_preds = self.estimator.predict(
                    np.clip(x - trans(diff.reshape(x.shape)), clip_min, clip_max), batch_size=self.batch_size
                )
            elif self.attack == "px":
                left_preds = self.estimator.predict(
                    np.clip(x - diff.reshape(x.shape), clip_min, clip_max), batch_size=self.batch_size
                )
            left_prob = left_preds.reshape(-1)[desired_label]

            if self.attack == "dct":
                right_preds = self.estimator.predict(
                    np.clip(x + trans(diff.reshape(x.shape)), clip_min, clip_max), batch_size=self.batch_size
                )
            elif self.attack == "px":
                right_preds = self.estimator.predict(
                    np.clip(x + diff.reshape(x.shape), clip_min, clip_max), batch_size=self.batch_size
                )
            right_prob = right_preds.reshape(-1)[desired_label]

            # Use (2 * int(self.targeted) - 1) to shorten code?
            if self.targeted:
                if left_prob > last_prob:
                    if left_prob > right_prob:
                        if self.attack == "dct":
                            x = np.clip(x - trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x - diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = left_prob
                        current_label = np.argmax(left_preds, axis=1)[0]
                    else:
                        if self.attack == "dct":
                            x = np.clip(x + trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x + diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = right_prob
                        current_label = np.argmax(right_preds, axis=1)[0]
                else:
                    if right_prob > last_prob:
                        if self.attack == "dct":
                            x = np.clip(x + trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x + diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = right_prob
                        current_label = np.argmax(right_preds, axis=1)[0]
            else:
                if left_prob < last_prob:
                    if left_prob < right_prob:
                        if self.attack == "dct":
                            x = np.clip(x - trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x - diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = left_prob
                        current_label = np.argmax(left_preds, axis=1)[0]
                    else:
                        if self.attack == "dct":
                            x = np.clip(x + trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x + diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = right_prob
                        current_label = np.argmax(right_preds, axis=1)[0]
                else:
                    if right_prob < last_prob:
                        if self.attack == "dct":
                            x = np.clip(x + trans(diff.reshape(x.shape)), clip_min, clip_max)
                        elif self.attack == "px":
                            x = np.clip(x + diff.reshape(x.shape), clip_min, clip_max)
                        last_prob = right_prob
                        current_label = np.argmax(right_preds, axis=1)[0]

            if self.targeted:
                if desired_label == current_label:
                    term_flag = 1
            else:
                if desired_label != current_label:
                    term_flag = 1

            nb_iter = nb_iter + 1

        if nb_iter < self.max_iter:
            logger.info("SimBA (%s) %s attack succeed", self.attack, ["non-targeted", "targeted"][int(self.targeted)])
        else:
            logger.info("SimBA (%s) %s attack failed", self.attack, ["non-targeted", "targeted"][int(self.targeted)])

        return x
コード例 #15
0
def _float_to_int16(data: np.ndarray) -> np.ndarray:
    amp = 2**15 - 1  # default amp for .DSB
    max_data = np.abs(data).max()
    min_data = np.abs(data).min()
    data = (data - min_data) / (max_data - min_data) * amp
    return data.astype(np.int16)
コード例 #16
0
 def __init__(self, arr: ndarray, prefs: ndarray):
     assert arr.shape == prefs.shape
     self._arr = arr.astype('bool')
     self._prefs = prefs
     self._score = None
コード例 #17
0
 def _cvt_ktoc_ndarray(self, data: np.ndarray) -> np.ndarray:
     return (data.astype(np.float32) - 27315.0) / 100.0
コード例 #18
0
 def __scaleAndAdjustMinimum(self, unscaled: np.ndarray):
     return unscaled.astype('float64') / self.__maxrange + self.__minimum
コード例 #19
0
 def get_raw_predictions(self, raw_embedding: ndarray) -> ndarray:
     raw_embedding = raw_embedding.astype(numpy.float32)  # For T5 fp16
     embedding = torch.tensor(raw_embedding).to(self._device)
     # Pass embeddings to model to produce predictions
     yhat_conservation = self._conservation_model(embedding)
     return yhat_conservation
コード例 #20
0
ファイル: utils.py プロジェクト: marvel-works/cortex-1
 def write(self, frame: np.ndarray):
     self.process.stdin.write(frame.astype(np.uint8).tobytes())
コード例 #21
0
def make_chromosome(ndarray: np.ndarray) -> Chromosome:
    chromosome = Chromosome(ndarray.size)
    chromosome.genes = np.copy(ndarray.astype(np.bool_))
    return chromosome
コード例 #22
0
def plot_confusion_matrix(
    cm: np.ndarray,
    class_names=None,
    normalize=False,
    title="confusion matrix",
    fname=None,
    show=True,
    figsize=12,
    fontsize=32,
    colormap="Blues",
):
    """Render the confusion matrix and return matplotlib"s figure with it.
    Normalization can be applied by setting `normalize=True`.

    Args:
        cm: numpy confusion matrix
        class_names: class names
        normalize: boolean flag to normalize confusion matrix
        title: title
        fname: filename to save confusion matrix
        show: boolean flag for preview
        figsize: matplotlib figure size
        fontsize: matplotlib font size
        colormap: matplotlib color map

    Returns:
        matplotlib figure
    """
    plt.ioff()

    cmap = plt.cm.__dict__[colormap]

    if class_names is None:
        class_names = [str(i) for i in range(len(np.diag(cm)))]

    if normalize:
        cm = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]

    plt.rcParams.update({"font.size": int(fontsize / np.log2(len(class_names)))})

    figure = plt.figure(figsize=(figsize, figsize))
    plt.title(title)
    plt.imshow(cm, interpolation="nearest", cmap=cmap)
    plt.colorbar()

    tick_marks = np.arange(len(class_names))
    plt.xticks(tick_marks, class_names, rotation=45, ha="right")

    plt.yticks(tick_marks, class_names)

    fmt = ".2f" if normalize else "d"
    thresh = cm.max() / 2.0
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(
            j,
            i,
            format(cm[i, j], fmt),
            horizontalalignment="center",
            color="white" if cm[i, j] > thresh else "black",
        )

    plt.tight_layout()
    plt.ylabel("True label")
    plt.xlabel("Predicted label")

    if fname is not None:
        plt.savefig(fname=fname)

    if show:
        plt.show()

    plt.ion()
    return figure
    def generate(self,
                 x: np.ndarray,
                 y: Optional[np.ndarray] = None,
                 **kwargs) -> np.ndarray:
        """
        Generate adversarial samples and return them in an array.

        :param x: An array with the original inputs.
        :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
                  (nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
                  samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
                  (explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
        :param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.
                     Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any
                     features for which the mask is zero will not be adversarially perturbed.
        :type mask: `np.ndarray`
        :return: An array holding the adversarial examples.
        """
        import tensorflow as tf  # lgtm [py/repeated-import]

        mask = self._get_mask(x, **kwargs)

        # Ensure eps is broadcastable
        self._check_compatibility_input_and_eps(x=x)

        # Check whether random eps is enabled
        self._random_eps()

        # Set up targets
        targets = self._set_targets(x, y)

        # Create dataset
        if mask is not None:
            # Here we need to make a distinction: if the masks are different for each input, we need to index
            # those for the current batch. Otherwise (i.e. mask is meant to be broadcasted), keep it as it is.
            if len(mask.shape) == len(x.shape):
                dataset = tf.data.Dataset.from_tensor_slices((
                    x.astype(ART_NUMPY_DTYPE),
                    targets.astype(ART_NUMPY_DTYPE),
                    mask.astype(ART_NUMPY_DTYPE),
                )).batch(self.batch_size, drop_remainder=False)

            else:
                dataset = tf.data.Dataset.from_tensor_slices((
                    x.astype(ART_NUMPY_DTYPE),
                    targets.astype(ART_NUMPY_DTYPE),
                    np.array([mask.astype(ART_NUMPY_DTYPE)] * x.shape[0]),
                )).batch(self.batch_size, drop_remainder=False)

        else:
            dataset = tf.data.Dataset.from_tensor_slices((
                x.astype(ART_NUMPY_DTYPE),
                targets.astype(ART_NUMPY_DTYPE),
            )).batch(self.batch_size, drop_remainder=False)

        # Start to compute adversarial examples
        adv_x = x.astype(ART_NUMPY_DTYPE)
        data_loader = iter(dataset)

        # Compute perturbation with batching
        for (batch_id, batch_all) in enumerate(
                tqdm(data_loader,
                     desc="PGD - Batches",
                     leave=False,
                     disable=not self.verbose)):
            if mask is not None:
                (batch, batch_labels,
                 mask_batch) = batch_all[0], batch_all[1], batch_all[2]
            else:
                (batch, batch_labels,
                 mask_batch) = batch_all[0], batch_all[1], None

            batch_index_1, batch_index_2 = batch_id * self.batch_size, (
                batch_id + 1) * self.batch_size

            # Compute batch_eps and batch_eps_step
            if isinstance(self.eps, np.ndarray):
                if len(self.eps.shape) == len(
                        x.shape) and self.eps.shape[0] == x.shape[0]:
                    batch_eps = self.eps[batch_index_1:batch_index_2]
                    batch_eps_step = self.eps_step[batch_index_1:batch_index_2]

                else:
                    batch_eps = self.eps
                    batch_eps_step = self.eps_step

            else:
                batch_eps = self.eps
                batch_eps_step = self.eps_step

            for rand_init_num in range(max(1, self.num_random_init)):
                if rand_init_num == 0:
                    # first iteration: use the adversarial examples as they are the only ones we have now
                    adv_x[batch_index_1:batch_index_2] = self._generate_batch(
                        x=batch,
                        targets=batch_labels,
                        mask=mask_batch,
                        eps=batch_eps,
                        eps_step=batch_eps_step)
                else:
                    adversarial_batch = self._generate_batch(
                        x=batch,
                        targets=batch_labels,
                        mask=mask_batch,
                        eps=batch_eps,
                        eps_step=batch_eps_step)
                    attack_success = compute_success_array(
                        self.estimator,
                        batch,
                        batch_labels,
                        adversarial_batch,
                        self.targeted,
                        batch_size=self.batch_size,
                    )
                    # return the successful adversarial examples
                    adv_x[batch_index_1:batch_index_2][
                        attack_success] = adversarial_batch[attack_success]

        logger.info(
            "Success rate of attack: %.2f%%",
            100 * compute_success(self.estimator,
                                  x,
                                  targets,
                                  adv_x,
                                  self.targeted,
                                  batch_size=self.batch_size),
        )

        return adv_x
コード例 #24
0
ファイル: note.py プロジェクト: oguzkirman/muspy
def from_note_representation(
    array: ndarray,
    resolution: int = DEFAULT_RESOLUTION,
    program: int = 0,
    is_drum: bool = False,
    use_start_end: bool = False,
    encode_velocity: bool = True,
    default_velocity: int = 64,
) -> Music:
    """Decode note-based representation into a Music object.

    Parameters
    ----------
    array : ndarray
        Array in note-based representation to decode. Will be casted to
        integer if not of integer type.
    resolution : int
        Time steps per quarter note. Defaults to `muspy.DEFAULT_RESOLUTION`.
    program : int, optional
        Program number according to General MIDI specification [1].
        Acceptable values are 0 to 127. Defaults to 0 (Acoustic Grand
        Piano).
    is_drum : bool, optional
        A boolean indicating if it is a percussion track. Defaults to
        False.
    use_start_end : bool
        Whether to use 'start' and 'end' to encode the timing rather than
        'time' and 'duration'. Defaults to False.
    encode_velocity : bool
        Whether to encode note velocities. Defaults to True.
    default_velocity : int
        Default velocity value to use when decoding if `encode_velocity` is
        False. Defaults to 64.

    Returns
    -------
    :class:`muspy.Music` object
        Decoded Music object.

    References
    ----------
    [1] https://www.midi.org/specifications/item/gm-level-1-sound-set

    """
    if not np.issubdtype(array.dtype, np.integer):
        array = array.astype(np.int)

    notes = []
    velocity = default_velocity
    for note_tuple in array:
        if encode_velocity:
            velocity = note_tuple[3]
        note = Note(
            time=note_tuple[1],
            duration=note_tuple[2] -
            note_tuple[1] if use_start_end else note_tuple[2],
            pitch=note_tuple[0],
            velocity=velocity,
        )
        notes.append(note)

    # Sort the notes
    notes.sort(key=attrgetter("time", "pitch", "duration", "velocity"))

    # Create the Track and Music objects
    track = Track(program=program, is_drum=is_drum, notes=notes)
    music = Music(resolution=resolution, tracks=[track])

    return music
コード例 #25
0
    def do_pruning(self, in_channel_mask: np.ndarray, pruner: Pruner):
        """
        Prune the block in place
        :param in_channel_mask: a 0-1 vector indicates whether the corresponding channel should be pruned (0) or not (1)
        :param pruner: the method to determinate the pruning threshold.
        the pruner accepts a torch.Tensor as input and return a threshold
        """
        # prune the pixel-wise conv layer
        if self.pw:
            pw_layer = self.conv[0]
            in_channel_mask, input_gate_mask = prune_conv_layer(conv_layer=pw_layer[0],
                                                                bn_layer=pw_layer[1],
                                                                sparse_layer_in=self.input_gate if self.has_input_mask else None,
                                                                sparse_layer_out=pw_layer.sparse_layer,
                                                                in_channel_mask=None if self.has_input_mask else in_channel_mask,
                                                                pruner=pruner,
                                                                prune_output_mode="prune",
                                                                prune_mode='default')
            if not np.any(in_channel_mask) or not np.any(input_gate_mask):
                # no channel left
                self._prune_whole_layer()
                return self.output_channel

            channel_select_idx = np.squeeze(np.argwhere(np.asarray(input_gate_mask)))
            if len(channel_select_idx.shape) == 0:
                # expand the single scalar to array
                channel_select_idx = np.expand_dims(channel_select_idx, 0)
            elif len(channel_select_idx.shape) == 1 and channel_select_idx.shape[0] == 0:
                # nothing left
                raise NotImplementedError("No layer left in input channel")
            self.select.idx = channel_select_idx
            if self.has_input_mask:
                self.input_gate.do_pruning(input_gate_mask)

        # update the hidden dim
        self.hidden_dim = int(in_channel_mask.astype(np.int).sum())

        # prune the output of the dw layer
        # this in_channel_mask is supposed unchanged
        dw_layer = self.conv[-5]
        in_channel_mask, _ = prune_conv_layer(conv_layer=dw_layer[0],
                                              bn_layer=dw_layer[1],
                                              sparse_layer_in=None,
                                              sparse_layer_out=dw_layer.sparse_layer,
                                              in_channel_mask=in_channel_mask,
                                              pruner=pruner,
                                              prune_output_mode="same",
                                              prune_mode='default')

        # prune input of the dw-linear layer (the last layer)
        out_channel_mask, _ = prune_conv_layer(conv_layer=self.conv[-4],
                                               bn_layer=self.conv[-3],
                                               sparse_layer_in=None,
                                               sparse_layer_out=self.conv[-2] if isinstance(self.conv[-2],
                                                                                            SparseGate) else self.conv[
                                                   -3],
                                               in_channel_mask=in_channel_mask,
                                               pruner=pruner,
                                               prune_output_mode="prune",
                                               prune_mode='default')

        # update output_channel
        self.output_channel = int(out_channel_mask.astype(np.int).sum())

        # if self.use_res_connect:
        # do padding allowing adding with residual connection
        # the output dim is unchanged
        expander: ChannelExpand = self.conv[-1]
        # note that the idx of the expander might be set in a pruned model
        original_expander_idx = expander.idx
        assert len(original_expander_idx) == len(out_channel_mask), "the output channel should be consistent"
        pruned_expander_idx = original_expander_idx[out_channel_mask]
        idx = np.squeeze(pruned_expander_idx)
        expander.idx = idx
        pass

        # return the output dim
        # the output dim is kept unchanged
        return expander.channel_num
コード例 #26
0
    def _linear_disc_mat_eig(
            cls,
            N: np.ndarray,
            y: np.ndarray,
            ddof: int = 1,
            classes: t.Optional[np.ndarray] = None,
            class_freqs: t.Optional[np.ndarray] = None,
    ) -> t.Tuple[np.ndarray, np.ndarray]:
        """Compute eigenvalues/vecs of the Linear Discriminant Analysis Matrix.

        More specifically, the eigenvalues and eigenvectors are calculated from
        matrix S = (Scatter_Within_Mat)^(-1) * (Scatter_Between_Mat).

        Check ``ft_can_cor`` documentation for more in-depth information about
        this matrix.

        Parameters
        ----------
        ddof : :obj:`int`, optional
            Degrees of freedom of covariance matrix calculated during LDA.

        classes : :obj:`np.ndarray`, optional
            Distinct classes of ``y``.

        class_freqs : :obj:`np.ndarray`, optional
            Absolute class frequencies of ``y``.

        Returns
        -------
        :obj:`tuple`(:obj:`np.ndarray`, :obj:`np.ndarray`)

            Eigenvalues and eigenvectors (in this order) of Linear
            Discriminant Analysis Matrix.
        """

        def compute_scatter_within(
                N: np.ndarray,
                y: np.ndarray,
                class_val_freq: t.Tuple[np.ndarray, np.ndarray],
                ddof: int = 1) -> np.ndarray:
            """Compute Scatter Within matrix. Check doc above for more info."""
            scatter_within = np.array(
                [(cl_frq - 1.0) * np.cov(
                    N[y == cl_val, :], rowvar=False, ddof=ddof)
                 for cl_val, cl_frq in zip(*class_val_freq)]).sum(axis=0)

            return scatter_within

        def compute_scatter_between(
                N: np.ndarray, y: np.ndarray,
                class_val_freq: t.Tuple[np.ndarray, np.ndarray]) -> np.ndarray:
            """Compute Scatter Between matrix. The doc above has more info."""
            class_vals, class_freqs = class_val_freq

            class_means = np.array(
                [N[y == cl_val, :].mean(axis=0) for cl_val in class_vals])

            relative_centers = class_means - N.mean(axis=0)

            scatter_between = np.array([
                cl_frq * np.outer(rc, rc)
                for cl_frq, rc in zip(class_freqs, relative_centers)
            ]).sum(axis=0)

            return scatter_between

        if classes is None or class_freqs is None:
            class_val_freq = np.unique(y, return_counts=True)

        else:
            class_val_freq = (classes, class_freqs)

        N = N.astype(float)

        scatter_within = compute_scatter_within(
            N, y, class_val_freq, ddof=ddof)
        scatter_between = compute_scatter_between(N, y, class_val_freq)

        try:
            scatter_within_inv = np.linalg.inv(scatter_within)

            return np.linalg.eig(
                np.matmul(scatter_within_inv, scatter_between))

        except (np.linalg.LinAlgError, ValueError):
            return np.array([np.nan]), np.array([np.nan])
コード例 #27
0
def score(D:np.ndarray, target:np.ndarray, k=5, 
          metric:str='distance', test_set_ind:np.ndarray=None, verbose:int=0):
    """Perform `k`-nearest neighbor classification.
    
    Use the ``n x n`` symmetric distance matrix `D` and target class 
    labels `target` to perform a `k`-NN experiment (leave-one-out 
    cross-validation or evaluation of test set; see parameter `test_set_ind`).
    Ties are broken by the nearest neighbor.
    
    Parameters
    ----------
    D : ndarray
        The ``n x n`` symmetric distance (similarity) matrix.
    
    target : ndarray (of dtype=int)
        The ``n x 1`` target class labels (ground truth).
    
    k : int or array_like (of dtype=int), optional (default: 5)
        Neighborhood size for `k`-NN classification.
        For each value in `k`, one `k`-NN experiment is performed.
        
        HINT: Providing more than one value for `k` is a cheap means to perform 
        multiple `k`-NN experiments at once. Try e.g. ``k=[1, 5, 20]``.
    
    metric : {'distance', 'similarity'}, optional (default: 'distance')
        Define, whether matrix `D` is a distance or similarity matrix
    
    test_sed_ind : ndarray, optional (default: None)
        Define data points to be hold out as part of a test set. Can be:
        
        - None : Perform a LOO-CV experiment
        - ndarray : Hold out points indexed in this array as test set. Fit 
          model to remaining data. Evaluate model on test set.
    
    verbose : int, optional (default: 0)
        Increasing level of output (progress report).
    
    Returns
    -------
    acc : ndarray (shape=(n_k x 1), dtype=float)
        Classification accuracy (`n_k`... number of items in parameter `k`)
        
        HINT: Refering to the above example... 
        ... ``acc[0]`` gives the accuracy of the ``k=1`` experiment.
    corr : ndarray (shape=(n_k x n), dtype=int)
        Raw vectors of correctly classified items
        
        HINT: ... ``corr[1, :]`` gives these items for the ``k=5`` experiment.
    cmat : ndarray (shape=(n_k x n_t x n_t), dtype=int) 
        Confusion matrix (``n_t`` number of unique items in parameter target)
        
        HINT: ... ``cmat[2, :, :]`` gives the confusion matrix of 
        the ``k=20`` experiment.
    """
    
    # Check input sanity
    log = Logging.ConsoleLogging()
    IO._check_distance_matrix_shape(D)
    IO._check_distance_matrix_shape_fits_labels(D, target)
    IO._check_valid_metric_parameter(metric)
    if metric == 'distance':
        d_self = np.inf
        sort_order = 1
    if metric == 'similarity':
        d_self = -np.inf
        sort_order = -1
    
    # Copy, because data is changed
    D = D.copy()
    target = target.astype(int)
    
    if verbose:
        log.message("Start k-NN experiment.")
    # Handle LOO-CV vs. test set mode
    if test_set_ind is None:
        n = D.shape[0]
        test_set_ind = range(n)    # dummy 
        train_set_ind = n   # dummy
    else:  
        # number of points to be classified
        n = test_set_ind.size
        # Indices of training examples
        train_set_ind = np.setdiff1d(np.arange(n), test_set_ind)
    # Number of k-NN parameters
    try:
        k_length = k.size
    except AttributeError as e:
        if isinstance(k, int):
            k = np.array([k])
            k_length = k.size
        elif isinstance(k, list):
            k = np.array(k)
            k_length = k.size
        else:
            raise e
        
    acc = np.zeros((k_length, 1))
    corr = np.zeros((k_length, D.shape[0]))
        
    cl = np.sort(np.unique(target))
    cmat = np.zeros((k_length, len(cl), len(cl)))
    
    classes = target.copy()
    for idx, cur_class in enumerate(cl):
        # change labels to 0, 1, ..., len(cl)-1
        classes[target == cur_class] = idx
    
    cl = range(len(cl))
    
    # Classify each point in test set
    for i in test_set_ind:
        seed_class = classes[i]
        
        if issparse(D):
            row = D.getrow(i).toarray().ravel()
        else:
            row = D[i, :]
        row[i] = d_self
        
        # Sort points in training set according to distance
        # Randomize, in case there are several points of same distance
        # (this is especially relevant for SNN rescaling)
        rp = train_set_ind
        rp = np.random.permutation(rp)
        d2 = row[rp]
        d2idx = np.argsort(d2, axis=0)[::sort_order]
        idx = rp[d2idx]      
        
        # More than one k is useful for cheap multiple k-NN experiments at once
        for j in range(k_length):
            nn_class = classes[idx[0:k[j]]]
            cs = np.bincount(nn_class.astype(int))
            max_cs = np.where(cs == np.max(cs))[0]
            
            # "tie": use nearest neighbor
            if len(max_cs) > 1:
                if seed_class == nn_class[0]:
                    acc[j] += 1/n 
                    corr[j, i] = 1
                cmat[j, seed_class, nn_class[0]] += 1       
            # majority vote
            else:
                if cl[max_cs[0]] == seed_class:
                    acc[j] += 1/n
                    corr[j, i] = 1
                cmat[j, seed_class, cl[max_cs[0]]] += 1
                       
    if verbose:
        log.message("Finished k-NN experiment.")
        
    return acc, corr, cmat
コード例 #28
0
def validate_by_template_matching(img: np.ndarray):
    """ Detect 3d black boxes by template matching.
    1. binarize the image. the voxels inside the black box will be false, and the outside will be true
    2. The template is 7x7x2 with one section true and the other false. 
    3. sliding the template through the array, and detect the matching regions. 
    4. rotate the template to be 7x2x7 and 2x7x7, do the same detection.
    5. if we can find multiple matchings in all the x,y,z direction, there is probably a black box. 
    Note that this is always effective. If the black box is large enough to reach both sides, 
    the detection will fail.

    Parameters
    -----------
    img:
        3D image volume.
    """
    logging.info("validation by template matching...")

    if np.issubdtype(img.dtype, np.floating):
        logging.warning(
            'do not support image with floating data type, will skip the validation.'
        )
        return True

    img = img.astype(dtype=bool)

    score_threshold = 0.9
    num_threshold = 100
    evidence_point = 0

    temp = np.zeros((7, 7, 2), dtype=bool)
    temp[:, :, 0] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    temp = np.zeros((7, 7, 2), dtype=bool)
    temp[:, :, 1] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    temp = np.zeros((2, 7, 7), dtype=bool)
    temp[0, :, :] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    temp = np.zeros((2, 7, 7), dtype=bool)
    temp[1, :, :] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    temp = np.zeros((7, 2, 7), dtype=bool)
    temp[:, 0, :] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    temp = np.zeros((7, 2, 7), dtype=bool)
    temp[:, 1, :] = True
    result = match_template(img, temp)
    if np.count_nonzero(result > score_threshold) > num_threshold:
        evidence_point += 1

    if evidence_point > 4:
        return False
    else:
        return True
コード例 #29
0
ファイル: solids.py プロジェクト: sylvaus/quaternion-sim
 def set_position(self, pos: ndarray) -> None:
     self.pose.position = pos.astype(dtype=float).reshape(3, 1)
コード例 #30
0
    def generate(self,
                 x: np.ndarray,
                 y: Optional[np.ndarray] = None,
                 **kwargs) -> np.ndarray:
        """
        Generate adversarial samples and return them in a Numpy array.

        :param x: An array with the original inputs to be attacked.
        :param y: An array with the original labels to be predicted.
        :return: An array holding the adversarial examples.
        """
        x_adv = x.astype(ART_NUMPY_DTYPE)

        # Initialize variables
        y_pred = self.estimator.predict(x, batch_size=self.batch_size)
        pred_class = np.argmax(y_pred, axis=1)

        if self.estimator.nb_classes == 2 and y_pred.shape[1] == 1:
            raise ValueError(
                "This attack has not yet been tested for binary classification with a single output classifier."
            )

        # Compute perturbation with implicit batching
        for batch_id in trange(int(
                np.ceil(x_adv.shape[0] / float(self.batch_size))),
                               desc="NewtonFool",
                               disable=not self.verbose):
            batch_index_1, batch_index_2 = batch_id * self.batch_size, (
                batch_id + 1) * self.batch_size
            batch = x_adv[batch_index_1:batch_index_2]

            # Main algorithm for each batch
            norm_batch = np.linalg.norm(np.reshape(batch,
                                                   (batch.shape[0], -1)),
                                        axis=1)
            l_batch = pred_class[batch_index_1:batch_index_2]
            l_b = to_categorical(l_batch,
                                 self.estimator.nb_classes).astype(bool)

            # Main loop of the algorithm
            for _ in range(self.max_iter):
                # Compute score
                score = self.estimator.predict(batch)[l_b]

                # Compute the gradients and norm
                grads = self.estimator.class_gradient(batch, label=l_batch)
                if grads.shape[1] == 1:
                    grads = np.squeeze(grads, axis=1)
                norm_grad = np.linalg.norm(np.reshape(grads,
                                                      (batch.shape[0], -1)),
                                           axis=1)

                # Theta
                theta = self._compute_theta(norm_batch, score, norm_grad)

                # Perturbation
                di_batch = self._compute_pert(theta, grads, norm_grad)

                # Update xi and perturbation
                batch += di_batch

            # Apply clip
            if self.estimator.clip_values is not None:
                clip_min, clip_max = self.estimator.clip_values
                x_adv[batch_index_1:batch_index_2] = np.clip(
                    batch, clip_min, clip_max)
            else:
                x_adv[batch_index_1:batch_index_2] = batch

        logger.info(
            "Success rate of NewtonFool attack: %.2f%%",
            100 * compute_success(
                self.estimator, x, y, x_adv, batch_size=self.batch_size),
        )
        return x_adv
コード例 #31
0
ファイル: activation_ops.py プロジェクト: syyxsxx/openvino
 def leaky_relu(values: np.ndarray, negative_slope: float):
     values = values.astype(float)
     for index, x in np.ndenumerate(values):
         if x < 0:
             values[index] = negative_slope * x
     return values
コード例 #32
0
ファイル: identifier.py プロジェクト: MaybeS/MOT
 def transform(image: np.ndarray) \
         -> np.ndarray:
     image = image.astype(np.float32)
     image -= np.array([104, 117, 123], dtype=np.float32).reshape(1, 1, -1)
     image = image.transpose((2, 0, 1))
     return image
コード例 #33
0
    def generate(self,
                 x: np.ndarray,
                 y: Optional[np.ndarray] = None,
                 **kwargs) -> np.ndarray:
        """
        Generate adversarial samples and return them in an array.

        :param x: An array with the original inputs.
        :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape
                  (nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial
                  samples. Otherwise, model predictions are used as labels to avoid the "label leaking" effect
                  (explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.
        :return: An array holding the adversarial examples.
        """
        y = check_and_transform_label_format(y, self.estimator.nb_classes)

        if y is None:
            if self.targeted:
                raise ValueError(
                    "Target labels `y` need to be provided for a targeted attack."
                )
            y = get_labels_np_array(
                self.estimator.predict(x, batch_size=self.batch_size)).astype(
                    np.int32)

        x_adv = x.astype(ART_NUMPY_DTYPE)

        for _ in trange(max(1, self.nb_random_init),
                        desc="AutoPGD - restart",
                        disable=not self.verbose):
            # Determine correctly predicted samples
            y_pred = self.estimator.predict(x_adv)
            if self.targeted:
                sample_is_robust = np.argmax(y_pred, axis=1) != np.argmax(
                    y, axis=1)
            elif not self.targeted:
                sample_is_robust = np.argmax(y_pred,
                                             axis=1) == np.argmax(y, axis=1)

            if np.sum(sample_is_robust) == 0:
                break

            x_robust = x_adv[sample_is_robust]
            y_robust = y[sample_is_robust]
            x_init = x[sample_is_robust]

            n = x_robust.shape[0]
            m = np.prod(x_robust.shape[1:]).item()
            random_perturbation = (random_sphere(
                n, m, self.eps,
                self.norm).reshape(x_robust.shape).astype(ART_NUMPY_DTYPE))

            x_robust = x_robust + random_perturbation

            if self.estimator.clip_values is not None:
                clip_min, clip_max = self.estimator.clip_values
                x_robust = np.clip(x_robust, clip_min, clip_max)

            perturbation = projection(x_robust - x_init, self.eps, self.norm)
            x_robust = x_init + perturbation

            # Compute perturbation with implicit batching
            for batch_id in trange(
                    int(np.ceil(x_robust.shape[0] / float(self.batch_size))),
                    desc="AutoPGD - batch",
                    leave=False,
                    disable=not self.verbose,
            ):
                self.eta = 2 * self.eps_step
                batch_index_1, batch_index_2 = batch_id * self.batch_size, (
                    batch_id + 1) * self.batch_size
                x_k = x_robust[batch_index_1:batch_index_2].astype(
                    ART_NUMPY_DTYPE)
                x_init_batch = x_init[batch_index_1:batch_index_2].astype(
                    ART_NUMPY_DTYPE)
                y_batch = y_robust[batch_index_1:batch_index_2]

                p_0 = 0
                p_1 = 0.22
                W = [p_0, p_1]

                while True:
                    p_j_p_1 = W[-1] + max(W[-1] - W[-2] - 0.03, 0.06)
                    if p_j_p_1 > 1:
                        break
                    W.append(p_j_p_1)

                W = [math.ceil(p * self.max_iter) for p in W]

                eta = self.eps_step
                self.count_condition_1 = 0

                for k_iter in trange(self.max_iter,
                                     desc="AutoPGD - iteration",
                                     leave=False,
                                     disable=not self.verbose):

                    # Get perturbation, use small scalar to avoid division by 0
                    tol = 10e-8

                    # Get gradient wrt loss; invert it if attack is targeted
                    grad = self.estimator.loss_gradient(
                        x_k, y_batch) * (1 - 2 * int(self.targeted))

                    # Apply norm bound
                    if self.norm in [np.inf, "inf"]:
                        grad = np.sign(grad)
                    elif self.norm == 1:
                        ind = tuple(range(1, len(x_k.shape)))
                        grad = grad / (np.sum(
                            np.abs(grad), axis=ind, keepdims=True) + tol)
                    elif self.norm == 2:
                        ind = tuple(range(1, len(x_k.shape)))
                        grad = grad / (np.sqrt(
                            np.sum(np.square(grad), axis=ind, keepdims=True)) +
                                       tol)
                    assert x_k.shape == grad.shape

                    perturbation = grad

                    # Apply perturbation and clip
                    z_k_p_1 = x_k + eta * perturbation

                    if self.estimator.clip_values is not None:
                        clip_min, clip_max = self.estimator.clip_values
                        z_k_p_1 = np.clip(z_k_p_1, clip_min, clip_max)

                    if k_iter == 0:
                        x_1 = z_k_p_1
                        perturbation = projection(x_1 - x_init_batch, self.eps,
                                                  self.norm)
                        x_1 = x_init_batch + perturbation

                        f_0 = self.estimator.loss(x=x_k,
                                                  y=y_batch,
                                                  reduction="mean")
                        f_1 = self.estimator.loss(x=x_1,
                                                  y=y_batch,
                                                  reduction="mean")

                        self.eta_w_j_m_1 = eta
                        self.f_max_w_j_m_1 = f_0

                        if f_1 >= f_0:
                            self.f_max = f_1
                            self.x_max = x_1
                            self.x_max_m_1 = x_init_batch
                            self.count_condition_1 += 1
                        else:
                            self.f_max = f_0
                            self.x_max = x_k.copy()
                            self.x_max_m_1 = x_init_batch

                        # Settings for next iteration k
                        x_k_m_1 = x_k.copy()
                        x_k = x_1

                    else:
                        perturbation = projection(z_k_p_1 - x_init_batch,
                                                  self.eps, self.norm)
                        z_k_p_1 = x_init_batch + perturbation

                        alpha = 0.75

                        x_k_p_1 = x_k + alpha * (z_k_p_1 - x_k) + (
                            1 - alpha) * (x_k - x_k_m_1)

                        if self.estimator.clip_values is not None:
                            clip_min, clip_max = self.estimator.clip_values
                            x_k_p_1 = np.clip(x_k_p_1, clip_min, clip_max)

                        perturbation = projection(x_k_p_1 - x_init_batch,
                                                  self.eps, self.norm)
                        x_k_p_1 = x_init_batch + perturbation

                        f_k_p_1 = self.estimator.loss(x=x_k_p_1,
                                                      y=y_batch,
                                                      reduction="mean")

                        if f_k_p_1 > self.f_max:
                            self.count_condition_1 += 1
                            self.x_max = x_k_p_1
                            self.x_max_m_1 = x_k
                            self.f_max = f_k_p_1

                        if k_iter in W:

                            rho = 0.75

                            condition_1 = self.count_condition_1 < rho * (
                                k_iter - W[W.index(k_iter) - 1])
                            condition_2 = self.eta_w_j_m_1 == eta and self.f_max_w_j_m_1 == self.f_max

                            if condition_1 or condition_2:
                                eta = eta / 2
                                x_k_m_1 = self.x_max_m_1
                                x_k = self.x_max
                            else:
                                x_k_m_1 = x_k
                                x_k = x_k_p_1.copy()

                            self.count_condition_1 = 0
                            self.eta_w_j_m_1 = eta
                            self.f_max_w_j_m_1 = self.f_max

                        else:
                            x_k_m_1 = x_k
                            x_k = x_k_p_1.copy()

                y_pred_adv_k = self.estimator.predict(x_k)
                if self.targeted:
                    sample_is_not_robust_k = np.invert(
                        np.argmax(y_pred_adv_k, axis=1) != np.argmax(y_batch,
                                                                     axis=1))
                elif not self.targeted:
                    sample_is_not_robust_k = np.invert(
                        np.argmax(y_pred_adv_k, axis=1) == np.argmax(y_batch,
                                                                     axis=1))

                x_robust[batch_index_1:batch_index_2][
                    sample_is_not_robust_k] = x_k[sample_is_not_robust_k]

            x_adv[sample_is_robust] = x_robust

        return x_adv
コード例 #34
0
ファイル: evaluate.py プロジェクト: fmcc/mss_layout_analysis
def normalise_confusion_matrix(cm: np.ndarray):
    return cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
コード例 #35
0
    def __call__(
        self,
        x: np.ndarray,
        y: Optional[np.ndarray] = None
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Apply JPEG compression to sample `x`.

        :param x: Sample to compress with shape of `NCHW`, `NHWC`, `NCFHW` or `NFHWC`. `x` values are expected to be in
                  the data range [0, 1] or [0, 255].
        :param y: Labels of the sample `x`. This function does not affect them in any way.
        :return: compressed sample.
        """
        x_ndim = x.ndim
        if x_ndim not in [4, 5]:
            raise ValueError(
                "Unrecognized input dimension. JPEG compression can only be applied to image and video data."
            )

        if x.min() < 0.0:
            raise ValueError(
                "Negative values in input `x` detected. The JPEG compression defence requires unnormalized input."
            )

        # Swap channel index
        if self.channels_first and x_ndim == 4:
            # image shape NCHW to NHWC
            x = np.transpose(x, (0, 2, 3, 1))
        elif self.channels_first and x_ndim == 5:
            # video shape NCFHW to NFHWC
            x = np.transpose(x, (0, 2, 3, 4, 1))

        # insert temporal dimension to image data
        if x_ndim == 4:
            x = np.expand_dims(x, axis=1)

        # Convert into uint8
        if self.clip_values[1] == 1.0:
            x = x * 255
        x = x.astype("uint8")

        # Set image mode
        if x.shape[-1] == 1:
            image_mode = "L"
        elif x.shape[-1] == 3:
            image_mode = "RGB"
        else:
            raise NotImplementedError(
                "Currently only support `RGB` and `L` images.")

        # Prepare grayscale images for "L" mode
        if image_mode == "L":
            x = np.squeeze(x, axis=-1)

        # Compress one image at a time
        x_jpeg = x.copy()
        for idx in tqdm(np.ndindex(x.shape[:2]), desc="JPEG compression"):
            x_jpeg[idx] = self._compress(x[idx], image_mode)

        # Undo preparation grayscale images for "L" mode
        if image_mode == "L":
            x_jpeg = np.expand_dims(x_jpeg, axis=-1)

        # Convert to ART dtype
        if self.clip_values[1] == 1.0:
            x_jpeg = x_jpeg / 255.0
        x_jpeg = x_jpeg.astype(ART_NUMPY_DTYPE)

        # remove temporal dimension for image data
        if x_ndim == 4:
            x_jpeg = np.squeeze(x_jpeg, axis=1)

        # Swap channel index
        if self.channels_first and x_jpeg.ndim == 4:
            # image shape NHWC to NCHW
            x_jpeg = np.transpose(x_jpeg, (0, 3, 1, 2))
        elif self.channels_first and x_ndim == 5:
            # video shape NFHWC to NCFHW
            x_jpeg = np.transpose(x_jpeg, (0, 4, 1, 2, 3))
        return x_jpeg, y
コード例 #36
0
ファイル: pose.py プロジェクト: sylvaus/quaternion-sim
 def translate(self, translation: ndarray) -> None:
     self.position += translation.astype(float).reshape(3, 1)
コード例 #37
0
def to_unique_bytes(a: np.ndarray, input_range: Tuple[float, float]) -> Any:
    """Returns an array of unique ubytes after applying LinearTransform on the input array."""
    ubyte_range = (np.iinfo(np.ubyte).min, np.iinfo(np.ubyte).max)
    a = LinearTransform.transform(data=a, input_range=input_range, output_range=ubyte_range)
    return np.unique(a.astype(np.ubyte))
コード例 #38
0
 def thresholded_relu(values: np.ndarray, alpha: float):
     values = values.astype(float)
     for index, x in np.ndenumerate(values):
         values[index] = values[index] * (x > alpha)
     return values
コード例 #39
0
def bfloat16_to_float32(data: np.ndarray,
                        dims: Union[int, Sequence[int]]) -> np.ndarray:
    """Converts ndarray of bf16 (as uint32) to f32 (as uint32)."""
    shift = lambda x: x << 16  # noqa: E731
    return shift(data.astype(np.int32)).reshape(dims).view(np.float32)
コード例 #40
0
ファイル: array.py プロジェクト: xiaoyanermiemie/MONAI
 def __call__(self, img: np.ndarray, dtype: Optional[np.dtype] = None):
     """
     Apply the transform to `img`, assuming `img` is a numpy array.
     """
     assert isinstance(img, np.ndarray), "image must be numpy array."
     return img.astype(self.dtype if dtype is None else dtype)
コード例 #41
0
ファイル: plotting.py プロジェクト: YaoMeng94/cbptools
def plot_volumetric_roi(data: np.ndarray,
                        out_file: str,
                        view: str = 'orig',
                        skew: tuple = None,
                        facecolor: str = 'bright',
                        edgecolor: str = 'dark',
                        lrflip: bool = False) -> None:
    """ Plot a volumetric ROI, color coding the cluster labels.

    Parameters
    ----------
    data : np.ndarray
        3D data array of a NIfTI image
    out_file : str
        Output filename for the .svg figure
    view : str, optional
        The viewing angle of the ROI. Allowed values are {'orig',
        'left', 'right', 'superior', 'inferior', 'anterior',
        'posterior'}
    skew : tuple, optional
        The elevation and azimuth to rotate the ROI. If set to
        (0, 0) it is difficult to see the 3D effect. If left empty,
        default skewing elevation and azimuth values are used.
    facecolor : str, optional
        The colors the voxels will take, ordered by cluster-id. The
        color palette from sns.color_palette() is used. Allowed values
        are are {'deep', 'muted', 'pastel', 'bright', 'dark',
        'colorblind'}
    edgecolor : str, optional
        The colors the borders around the voxels will take, ordered
        by cluster-id. Same as facecolor.
    lrflip : bool, optional
        Left-right flip of the ROI for viewing purposes

    """

    # Modify data matrix for optimal viewing
    data = data[find_objects(
        data.astype(bool).astype(int))[0]]  # remove whitespace

    if lrflip:
        data = np.fliplr(data)

    data = make_hollow(data)  # Remove voxels that aren't visible

    # Viewing angle
    views = {
        'orig': (30, 320),
        'left': (0, 0),
        'right': (0, 180),
        'superior': (90, 90),
        'inferior': (270, 90),
        'anterior': (0, -90),
        'posterior': (0, -270)
    }

    if not skew:
        skew = (10, -10) if {'inferior', 'posterior'}.intersection(
            {view}) else (10, 10)

    elev, azim = tuple(map(sum, zip(views[view], skew)))

    # Colors
    face_palette = sns.color_palette(facecolor).as_hex()
    edge_palette = sns.color_palette(edgecolor).as_hex()
    facecolors = np.empty(data.shape, dtype=object)
    edgecolors = np.empty(data.shape, dtype=object)

    for i, k in enumerate(np.unique(data)[1:]):
        facecolors[data == k] = face_palette[i]
        edgecolors[data == k] = edge_palette[i]

    # Plotting
    plt.ioff()
    fig = plt.figure(figsize=(30 / 2.54, 30 / 2.54))
    fig.tight_layout()
    ax = fig.gca(projection='3d')
    ax.view_init(elev, azim)
    ax.voxels(data, facecolors=facecolors, edgecolors=edgecolors)
    dim = np.max(data.shape) / 2
    ax.set_xlim(right=dim * 2)
    ax.set_ylim(top=dim * 2)
    ax.set_zlim(top=dim * 2)
    plt.axis('off')
    fig.savefig(out_file, transparent=True, bbox_inches='tight', pad_inches=0)
    plt.close(fig)
コード例 #42
0
ファイル: nifti_writer.py プロジェクト: walehn/MONAI
def write_nifti(
    data: np.ndarray,
    file_name: str,
    affine: Optional[np.ndarray] = None,
    target_affine: Optional[np.ndarray] = None,
    resample: bool = True,
    output_spatial_shape: Optional[Sequence[int]] = None,
    mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
    padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
    align_corners: bool = False,
    dtype: Optional[np.dtype] = np.float64,
    output_dtype: Optional[np.dtype] = np.float32,
) -> None:
    """
    Write numpy data into NIfTI files to disk.  This function converts data
    into the coordinate system defined by `target_affine` when `target_affine`
    is specified.

    If the coordinate transform between `affine` and `target_affine` could be
    achieved by simply transposing and flipping `data`, no resampling will
    happen.  otherwise this function will resample `data` using the coordinate
    transform computed from `affine` and `target_affine`.  Note that the shape
    of the resampled `data` may subject to some rounding errors. For example,
    resampling a 20x20 pixel image from pixel size (1.5, 1.5)-mm to (3.0,
    3.0)-mm space will return a 10x10-pixel image.  However, resampling a
    20x20-pixel image from pixel size (2.0, 2.0)-mm to (3.0, 3.0)-mma space
    will output a 14x14-pixel image, where the image shape is rounded from
    13.333x13.333 pixels. In this case `output_spatial_shape` could be specified so
    that this function writes image data to a designated shape.

    When `affine` and `target_affine` are None, the data will be saved with an
    identity matrix as the image affine.

    This function assumes the NIfTI dimension notations.
    Spatially it supports up to three dimensions, that is, H, HW, HWD for
    1D, 2D, 3D respectively.
    When saving multiple time steps or multiple channels `data`, time and/or
    modality axes should be appended after the first three dimensions.  For
    example, shape of 2D eight-class segmentation probabilities to be saved
    could be `(64, 64, 1, 8)`. Also, data in shape (64, 64, 8), (64, 64, 8, 1)
    will be considered as a single-channel 3D image.

    Args:
        data: input data to write to file.
        file_name: expected file name that saved on disk.
        affine: the current affine of `data`. Defaults to `np.eye(4)`
        target_affine: before saving
            the (`data`, `affine`) as a Nifti1Image,
            transform the data into the coordinates defined by `target_affine`.
        resample: whether to run resampling when the target affine
            could not be achieved by swapping/flipping data axes.
        output_spatial_shape: spatial shape of the output image.
            This option is used when resample = True.
        mode: {``"bilinear"``, ``"nearest"``}
            This option is used when ``resample = True``.
            Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
            This option is used when ``resample = True``.
            Padding mode for outside grid values. Defaults to ``"border"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
            If None, use the data type of input data. To be compatible with other modules,
            the output data type is always ``np.float32``.
        output_dtype: data type for saving data. Defaults to ``np.float32``.
    """
    if not isinstance(data, np.ndarray):
        raise AssertionError("input data must be numpy array.")
    dtype = dtype or data.dtype
    sr = min(data.ndim, 3)
    if affine is None:
        affine = np.eye(4, dtype=np.float64)
    affine = to_affine_nd(sr, affine)

    if target_affine is None:
        target_affine = affine
    target_affine = to_affine_nd(sr, target_affine)

    if np.allclose(affine, target_affine, atol=1e-3):
        # no affine changes, save (data, affine)
        results_img = nib.Nifti1Image(data.astype(output_dtype),
                                      to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # resolve orientation
    start_ornt = nib.orientations.io_orientation(affine)
    target_ornt = nib.orientations.io_orientation(target_affine)
    ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)
    data_shape = data.shape
    data = nib.orientations.apply_orientation(data, ornt_transform)
    _affine = affine @ nib.orientations.inv_ornt_aff(ornt_transform,
                                                     data_shape)
    if np.allclose(_affine, target_affine, atol=1e-3) or not resample:
        results_img = nib.Nifti1Image(data.astype(output_dtype),
                                      to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # need resampling
    affine_xform = AffineTransform(normalized=False,
                                   mode=mode,
                                   padding_mode=padding_mode,
                                   align_corners=align_corners,
                                   reverse_indexing=True)
    transform = np.linalg.inv(_affine) @ target_affine
    if output_spatial_shape is None:
        output_spatial_shape, _ = compute_shape_offset(data.shape, _affine,
                                                       target_affine)
    output_spatial_shape_ = list(output_spatial_shape)
    if data.ndim > 3:  # multi channel, resampling each channel
        while len(output_spatial_shape_) < 3:
            output_spatial_shape_ = output_spatial_shape_ + [1]
        spatial_shape, channel_shape = data.shape[:3], data.shape[3:]
        data_np = data.reshape(list(spatial_shape) + [-1])
        data_np = np.moveaxis(data_np, -1, 0)  # channel first for pytorch
        data_torch = affine_xform(
            torch.as_tensor(
                np.ascontiguousarray(data_np).astype(dtype)).unsqueeze(0),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[:3],
        )
        data_np = data_torch.squeeze(0).detach().cpu().numpy()
        data_np = np.moveaxis(data_np, 0, -1)  # channel last for nifti
        data_np = data_np.reshape(
            list(data_np.shape[:3]) + list(channel_shape))
    else:  # single channel image, need to expand to have batch and channel
        while len(output_spatial_shape_) < len(data.shape):
            output_spatial_shape_ = output_spatial_shape_ + [1]
        data_torch = affine_xform(
            torch.as_tensor(
                np.ascontiguousarray(data).astype(dtype)[None, None]),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[:len(data.shape)],
        )
        data_np = data_torch.squeeze(0).squeeze(0).detach().cpu().numpy()

    results_img = nib.Nifti1Image(data_np.astype(output_dtype),
                                  to_affine_nd(3, target_affine))
    nib.save(results_img, file_name)
    return
コード例 #43
0
    def _generate_bss(
            self, x_batch: np.ndarray, y_batch: np.ndarray,
            c_batch: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
        Generate adversarial examples for a batch of inputs with a specific batch of constants.

        :param x_batch: A batch of original examples.
        :param y_batch: A batch of targets (0-1 hot).
        :param c_batch: A batch of constants.
        :return: A tuple of best elastic distances, best labels, best attacks.
        """
        def compare(object1, object2):
            return object1 == object2 if self.targeted else object1 != object2

        x_orig = x_batch.astype(ART_NUMPY_DTYPE)
        fine_tuning = np.full(x_batch.shape[0], False, dtype=bool)
        prev_loss = 1e6 * np.ones(x_batch.shape[0])
        prev_l2dist = np.zeros(x_batch.shape[0])

        # Resize and initialize Adam
        if self.use_resize:
            x_orig = self._resize_image(x_orig, self._init_size,
                                        self._init_size, True)
            assert (x_orig != 0).any()
            x_adv = x_orig.copy()
        else:
            x_orig = x_batch
            self._reset_adam(np.prod(self.estimator.input_shape).item())
            if x_batch.shape == self._current_noise.shape:
                self._current_noise.fill(0)
            else:
                self._current_noise = np.zeros(x_batch.shape,
                                               dtype=ART_NUMPY_DTYPE)
            x_adv = x_orig.copy()

        # Initialize best distortions, best changed labels and best attacks
        best_dist = np.inf * np.ones(x_adv.shape[0])
        best_label = -np.inf * np.ones(x_adv.shape[0])
        best_attack = np.array([x_adv[i] for i in range(x_adv.shape[0])])

        for iter_ in range(self.max_iter):
            logger.debug("Iteration step %i out of %i", iter_, self.max_iter)

            # Upscaling for very large number of iterations
            if self.use_resize:
                if iter_ == 2000:
                    x_adv = self._resize_image(x_adv, 64, 64)
                    x_orig = zoom(
                        x_orig,
                        [
                            1,
                            x_adv.shape[1] / x_orig.shape[1],
                            x_adv.shape[2] / x_orig.shape[2],
                            x_adv.shape[3] / x_orig.shape[3],
                        ],
                    )
                elif iter_ == 10000:
                    x_adv = self._resize_image(x_adv, 128, 128)
                    x_orig = zoom(
                        x_orig,
                        [
                            1,
                            x_adv.shape[1] / x_orig.shape[1],
                            x_adv.shape[2] / x_orig.shape[2],
                            x_adv.shape[3] / x_orig.shape[3],
                        ],
                    )

            # Compute adversarial examples and loss
            x_adv = self._optimizer(x_adv, y_batch, c_batch)
            preds, l2dist, loss = self._loss(x_orig, x_adv, y_batch, c_batch)

            # Reset Adam if a valid example has been found to avoid overshoot
            mask_fine_tune = (~fine_tuning) & (loss == l2dist) & (prev_loss !=
                                                                  prev_l2dist)
            fine_tuning[mask_fine_tune] = True
            self._reset_adam(self.adam_mean.size,
                             np.repeat(mask_fine_tune,
                                       x_adv[0].size))  # type: ignore
            prev_l2dist = l2dist

            # Abort early if no improvement is obtained
            if self.abort_early and iter_ % self._early_stop_iters == 0:
                if (loss > 0.9999 * prev_loss).all():
                    break
                prev_loss = loss

            # Adjust the best result
            labels_batch = np.argmax(y_batch, axis=1)
            for i, (dist,
                    pred) in enumerate(zip(l2dist, np.argmax(preds, axis=1))):
                if dist < best_dist[i] and compare(pred, labels_batch[i]):
                    best_dist[i] = dist
                    best_attack[i] = x_adv[i]
                    best_label[i] = pred

        # Resize images to original size before returning
        best_attack = np.array(best_attack)
        if self.use_resize:
            if not self.estimator.channels_first:
                best_attack = zoom(
                    best_attack,
                    [
                        1,
                        int(x_batch.shape[1]) / best_attack.shape[1],
                        int(x_batch.shape[2]) / best_attack.shape[2],
                        1,
                    ],
                )
            else:
                best_attack = zoom(
                    best_attack,
                    [
                        1,
                        1,
                        int(x_batch.shape[2]) / best_attack.shape[2],
                        int(x_batch.shape[2]) / best_attack.shape[3],
                    ],
                )

        return best_dist, best_label, best_attack
コード例 #44
0
 def fit_transform_params(column: np.ndarray, backend: "Backend") -> dict:  # noqa
     compute = backend.df_engine.compute
     return {
         "mean": compute(column.astype(np.float32).mean()),
         "std": compute(column.astype(np.float32).std()),
     }
コード例 #45
0
ファイル: profile.py プロジェクト: jrkerns/pylinac
def peak_detect(values: np.ndarray, threshold: Union[float, int]=None, min_distance: Union[float, int]=10,
                max_number: int=None, search_region: Tuple[float, float]=(0.0, 1.0),
                find_min_instead: bool=False) -> Tuple[np.ndarray, np.ndarray]:
    """Find the peaks or valleys of a 1D signal.

    Uses the difference (np.diff) in signal to find peaks. Current limitations include:
        1) Only for use in 1-D data; 2D may be possible with the gradient function.
        2) Will not detect peaks at the very edge of array (i.e. 0 or -1 index)

    Parameters
    ----------
    values : array-like
        Signal values to search for peaks within.
    threshold : int, float
        The value the peak must be above to be considered a peak. This removes "peaks"
        that are in a low-value region.
        If passed an int, the actual value is the threshold.
        E.g. when passed 15, any peak less with a value <15 is removed.
        If passed a float, it will threshold as a percent. Must be between 0 and 1.
        E.g. when passed 0.4, any peak <40% of the maximum value will be removed.
    min_distance : int, float
        If passed an int, parameter is the number of elements apart a peak must be from neighboring peaks.
        If passed a float, must be between 0 and 1 and represents the ratio of the profile to exclude.
        E.g. if passed 0.05 with a 1000-element profile, the minimum peak width will be 0.05*1000 = 50 elements.
    max_number : int
        Specify up to how many peaks will be returned. E.g. if 3 is passed in and 5 peaks are found, only the 3 largest
        peaks will be returned.
    find_min_instead : bool
        If False (default), peaks will be returned.
        If True, valleys will be returned.

    Returns
    -------
    max_vals : numpy.array
        The values of the peaks found.
    max_idxs : numpy.array
        The x-indices (locations) of the peaks.

    Raises
    ------
    ValueError
        If float not between 0 and 1 passed to threshold.
    """
    peak_vals = []  # a list to hold the y-values of the peaks. Will be converted to a numpy array
    peak_idxs = []  # ditto for x-values (index) of y data.

    if find_min_instead:
        values = -values

    """Limit search to search region"""
    left_end = search_region[0]
    if is_float_like(left_end):
        left_index = int(left_end*len(values))
    elif is_int_like(left_end):
        left_index = left_end
    else:
        raise ValueError(f"{left_end} must be a float or int")

    right_end = search_region[1]
    if is_float_like(right_end):
        right_index = int(right_end * len(values))
    elif is_int_like(right_end):
        right_index = right_end
    else:
        raise ValueError(f"{right_end} must be a float or int")

    # minimum peak spacing calc
    if isinstance(min_distance, float):
        if 0 > min_distance >= 1:
            raise ValueError("When min_peak_width is passed a float, value must be between 0 and 1")
        else:
            min_distance = int(min_distance * len(values))

    values = values[left_index:right_index]

    """Determine threshold value"""
    if isinstance(threshold, float) and threshold < 1:
        data_range = values.max() - values.min()
        threshold = threshold * data_range + values.min()
    elif isinstance(threshold, float) and threshold >= 1:
        raise ValueError("When threshold is passed a float, value must be less than 1")
    elif threshold is None:
        threshold = values.min()

    """Take difference"""
    values_diff = np.diff(values.astype(float))  # y and y_diff must be converted to signed type.

    """Find all potential peaks"""
    for idx in range(len(values_diff) - 1):
        # For each item of the diff array, check if:
        # 1) The y-value is above the threshold.
        # 2) The value of y_diff is positive (negative for valley search), it means the y-value changed upward.
        # 3) The next y_diff value is zero or negative (or positive for valley search); a positive-then-negative diff value means the value
        # is a peak of some kind. If the diff is zero it could be a flat peak, which still counts.

        # 1)
        if values[idx + 1] < threshold:
            continue

        y1_gradient = values_diff[idx] > 0
        y2_gradient = values_diff[idx + 1] <= 0

        # 2) & 3)
        if y1_gradient and y2_gradient:
            # If the next value isn't zero it's a single-pixel peak. Easy enough.
            if values_diff[idx + 1] != 0:
                peak_vals.append(values[idx + 1])
                peak_idxs.append(idx + 1 + left_index)
            # elif idx >= len(y_diff) - 1:
            #     pass
            # Else if the diff value is zero, it could be a flat peak, or it could keep going up; we don't know yet.
            else:
                # Continue on until we find the next nonzero diff value.
                try:
                    shift = 0
                    while values_diff[(idx + 1) + shift] == 0:
                        shift += 1
                        if (idx + 1 + shift) >= (len(values_diff) - 1):
                            break
                    # If the next diff is negative (or positive for min), we've found a peak. Also put the peak at the center of the flat
                    # region.
                    is_a_peak = values_diff[(idx + 1) + shift] < 0
                    if is_a_peak:
                        peak_vals.append(values[int((idx + 1) + np.round(shift / 2))])
                        peak_idxs.append((idx + 1 + left_index) + np.round(shift / 2))
                except IndexError:
                    pass

    # convert to numpy arrays
    peak_vals = np.array(peak_vals)
    peak_idxs = np.array(peak_idxs)

    """Enforce the min_peak_distance by removing smaller peaks."""
    # For each peak, determine if the next peak is within the min peak width range.
    index = 0
    while index < len(peak_idxs) - 1:

        # If the second peak is closer than min_peak_distance to the first peak, find the larger peak and remove the other one.
        if peak_idxs[index] > peak_idxs[index + 1] - min_distance:
            if peak_vals[index] > peak_vals[index + 1]:
                idx2del = index + 1
            else:
                idx2del = index
            peak_vals = np.delete(peak_vals, idx2del)
            peak_idxs = np.delete(peak_idxs, idx2del)
        else:
            index += 1

    """If Maximum Number passed, return only up to number given based on a sort of peak values."""
    if max_number is not None and len(peak_idxs) > max_number:
        sorted_peak_vals = peak_vals.argsort()  # sorts low to high
        peak_vals = peak_vals[sorted_peak_vals[-max_number:]]
        peak_idxs = peak_idxs[sorted_peak_vals[-max_number:]]

    # If we were looking for minimums, convert the values back to the original sign
    if find_min_instead:
        peak_vals = -peak_vals

    return peak_vals, peak_idxs
コード例 #46
0
ファイル: activation_ops.py プロジェクト: syyxsxx/openvino
 def elu(values: np.ndarray, alpha: float):
     values = values.astype(float)
     for index, x in np.ndenumerate(values):
         if x < 0:
             values[index] = alpha * (np.exp(x) - 1)
     return values
コード例 #47
0
ファイル: array.py プロジェクト: oldMarcosVillacanas/MONAI
 def __call__(self, img: np.ndarray):
     assert isinstance(img, np.ndarray), "image must be numpy array."
     return img.astype(self.dtype)
コード例 #48
0
ファイル: image.py プロジェクト: der-Daniel/Bachelor-Thesis
def diff(img1: np.ndarray, img2: np.ndarray) -> np.ndarray:
	img1_ = img1.astype(int)
	img2_ = img2.astype(int)
	diff = img1_ - img2_
	return (np.abs(diff)).astype('uint8')
コード例 #49
0
def structural_similarity(array1: np.ndarray, array2: np.ndarray, filter_size: int = 40, filter_sigma: float = 1., \
                        k1: float = 0.01, k2: float = 0.03, max_val: int = 255) -> (np.float64, np.ndarray):
    """
    Compares two given array's with the Structural Similarity (SSIM) index method.

    References
    ----------
    Zhou Wang et al: https://github.com/obartra/ssim/blob/master/assets/ssim.pdf
    https://en.wikipedia.org/wiki/Structural_similarity
    https://scikit-image.org/docs/dev/auto_examples/transform/plot_ssim.html
    https://blog.csdn.net/weixin_42096901/article/details/90172534
    https://github.com/tensorflow/models/blob/master/research/compression/image_encoder/msssim.py

    Parameters
    ----------
    array1  numpy.ndarray  array to compare against the other given array
    array2  numpy.ndarray  array to compare against the other given array
    filter_size  int  gaussian kernel size
    filter_sigma  float  gaussian kernel intensity
    k1  float  default value
    k2  float  default value
    max_val  int  dynamic range of the image  255 for 8-bit  65535 for 16-bit

    Raises
    ------
    ValueError  if given array's doesn't match each others shape (height, width, channels)

    Returns
    -------
    mssim  numpy.ndarray  array (map) of the contrast sensitivity
    ssim  numpy.float64  mean of the contrast sensitivity  number between -1 and 1
    """
    if array1.shape != array2.shape:
        msg = 'Input arrays must have the same shape'
        raise ValueError(msg)

    array1 = array1.astype(np.float64)
    array2 = array2.astype(np.float64)
    height, width = array1.shape[:2]

    if filter_size:  # is 1 or more
        # filter size can't be larger than height or width of arrays.
        size = min(filter_size, height, width)

        # scale down sigma if a smaller filter size is used.
        sigma = size * filter_sigma / filter_size if filter_size else 0
        window = gaussian_kernel(shape=(size, ), sigma=(sigma, ))
        # convolve = convolve_array
        # compute weighted means
        mu1 = convolve_array(array1, window)
        mu2 = convolve_array(array2, window)

        # compute weighted covariances
        sigma_11 = convolve_array(np.multiply(array1, array1), window)
        sigma_22 = convolve_array(np.multiply(array2, array2), window)
        sigma_12 = convolve_array(np.multiply(array1, array2), window)
    else:  # Empty blur kernel so no need to convolve.
        mu1, mu2 = array1, array2
        sigma_11 = np.multiply(array1, array1)
        sigma_22 = np.multiply(array2, array2)
        sigma_12 = np.multiply(array1, array2)

    # compute weighted variances
    mu_11 = np.multiply(mu1, mu1)
    mu_22 = np.multiply(mu2, mu2)
    mu_12 = np.multiply(mu1, mu2)
    sigma_11 = np.subtract(sigma_11, mu_11)
    sigma_22 = np.subtract(sigma_22, mu_22)
    sigma_12 = np.subtract(sigma_12, mu_12)

    # constants to avoid numerical instabilities close to zero
    c1 = (k1 * max_val)**2.
    c2 = (k2 * max_val)**2.
    v1 = 2.0 * sigma_12 + c2
    v2 = sigma_11 + sigma_22 + c2

    # Numerator of SSIM
    num_ssim = (2. * mu_12 + c1) * v1  # -> np.ndarray

    # Denominator of SSIM
    den_ssim = (mu_11 + mu_22 + c1) * v2  # -> np.ndarray

    # SSIM (contrast sensitivity)
    ssim = num_ssim / den_ssim  # -> np.ndarray

    # MeanSSIM
    mssim = np.mean(ssim)  # -> np.float64
    return mssim, ssim  # -> (np.float64, np.ndarray)
コード例 #50
0
ファイル: matchmaker.py プロジェクト: pombredanne/matchmaker
def linear_sum_assignment_iter(cost_matrix: np.ndarray):
    """Iterate over the solutions to the linear sum assignment problem
    in increasing order of cost

    The method used for the first solution is the Hungarian algorithm,
    also known as the Munkres or Kuhn-Munkres algorithm.
    
    The method used to find the second best solution and iterate over
    the solutions is described in [1]_, but is implemented in a slightly
    different, Dijkstra-like way. The states are represented as

    .. math: (cost(N_k), r, M_k, N_k, I_k, O_k)

    with :math:``r`` a random number used to avoid comparing the assignments.

    This function can also solve a generalization of the classic assignment
    problem where the cost matrix is rectangular. If it has more rows than
    columns, then not every row needs to be assigned to a column, and vice
    versa.
    It supports infinite weights to represent edges that must never be
    used.

    Parameters
    ----------
    cost_matrix : array
        The cost matrix of the bipartite graph.

    Yields
    -------
    row_ind, col_ind : array
        An array of row indices and one of corresponding column indices giving
        the optimal assignment. The cost of the assignment can be computed
        as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
        sorted; in the case of a square cost matrix they will be equal to
        ``numpy.arange(cost_matrix.shape[0])``.
    
    Examples
    --------
    >>> cost = np.array([[4, 1, 3], [2, 0, float("inf")], [3, 2, 2]])
    >>> from matchmaker import linear_sum_assignment_iter
    >>> it = linear_sum_assignment_iter(cost)
    >>> row_ind, col_ind = next(it)
    >>> col_ind
    array([1, 0, 2])
    >>> cost[row_ind, col_ind].sum()
    5.0
    >>> row_ind, col_ind = next(it)
    >>> col_ind
    array([0, 1, 2])
    >>> cost[row_ind, col_ind].sum()
    6.0

    References
    ----------

    .. [1] Chegireddy, Chandra R., and Horst W. Hamacher. "Algorithms for finding
       k-best perfect matchings." Discrete applied mathematics 18, no. 2
       (1987): 155-165.

    """
    cost_matrix = np.asarray(cost_matrix)

    # make the cost_matrix square as the algorithm only works
    # for perfect matchings
    # any value other than 0 would work
    # see <https://cstheory.stackexchange.com/a/42168/43172>
    n, m = cost_matrix.shape
    if n < m:
        cost_matrix = np.concatenate(
            (cost_matrix, np.zeros((m - n, m), dtype=cost_matrix.dtype)), axis=0
        )
    elif n > m:
        cost_matrix = np.concatenate(
            (cost_matrix, np.zeros((n, n - m), dtype=cost_matrix.dtype)), axis=1
        )

    def transform(a, b):
        """transforms a solution assignment (a, b)
        back to the original matrix
        """
        mask = (a < n) & (b < m)
        return a[mask], b[mask]

    cost = lambda assignment: cost_matrix[assignment].sum()

    # linear_sum_assignment doesn't require the matrix to be square,
    # but second_best_assignment needs the best solution for a square matrix
    M1 = linear_sum_assignment(cost_matrix_without_inf(cost_matrix))
    if not np.isposinf(cost(M1)):
        yield transform(*M1)
    else:
        return

    # from now, use a copy of cost_matrix
    # with dtype float
    cost_matrix = cost_matrix.astype(float)

    I1 = []
    O1 = []
    N1 = _second_best_assignment(cost_matrix, M1, I1, O1)
    if N1 is None:
        return
    Q = [(cost(N1), np.random.rand(), M1, N1, I1, O1)]
    while Q:
        _, _, M, N, I, O = heappop(Q)
        yield transform(*N)

        e = _choose_in_difference(M, N)

        Ip, Op = I + [e], O
        Np = _second_best_assignment(cost_matrix, M, Ip, Op)
        if Np is not None:
            heappush(Q, (cost(Np), np.random.rand(), M, Np, Ip, Op))

        Ik, Ok = I, O + [e]
        Nk = _second_best_assignment(cost_matrix, N, Ik, Ok)
        if Nk is not None:
            heappush(Q, (cost(Nk), np.random.rand(), N, Nk, Ik, Ok))