コード例 #1
0
ファイル: copynet.py プロジェクト: kushalarora/nlp-models
 def _get_predicted_tokens(self,
                           predicted_indices: numpy.ndarray,
                           batch_metadata: List[Any],
                           n_best: int = None) -> List[Union[List[List[str]], List[str]]]:
     if not isinstance(predicted_indices, numpy.ndarray):
         predicted_indices = predicted_indices.detach().cpu().numpy()
     predicted_tokens: List[Union[List[List[str]], List[str]]] = []
     for top_k_predictions, metadata in zip(predicted_indices, batch_metadata):
         batch_predicted_tokens: List[List[str]] = []
         for indices in top_k_predictions[:n_best]:
             tokens: List[str] = []
             indices = list(indices)
             if self._end_index in indices:
                 indices = indices[:indices.index(self._end_index)]
             for index in indices:
                 if index >= self._target_vocab_size:
                     adjusted_index = index - self._target_vocab_size
                     token = metadata["source_tokens"][adjusted_index]
                 else:
                     token = self.vocab.get_token_from_index(index, self._target_namespace)
                 tokens.append(token)
             batch_predicted_tokens.append(tokens)
         if n_best == 1:
             predicted_tokens.append(batch_predicted_tokens[0])
         else:
             predicted_tokens.append(batch_predicted_tokens)
     return predicted_tokens
コード例 #2
0
ファイル: metrics.py プロジェクト: shuns0314/cloud
    def channel_post_process(self, 
                             mask: np.ndarray, 
                             thresholds: List[float], 
                             min_sizes: List[int], 
                             mask_threshold):
        """全チャンネルに対して、post_processを実行する.

        :arguments:
        mask: [channel, h, w]
        """
        mask = mask.detach().cpu().numpy()
        pred_mask = np.zeros(mask.shape)

        for batch in range(mask.shape[0]):
            for channel in range(mask.shape[1]):
                # 閾値より大きな値を1、それ以下を0にする。また、予測領域が小さい場合は消す。
                mask_p, _, _ = self.post_process(mask[batch, channel, :, :],
                                                 thresholds[channel],
                                                 min_sizes[channel],
                                                 batch, channel)
                # 予測領域の形状を1にする。
                _, contours, _ = cv2.findContours(mask_p.astype('uint8'), 1, 2)
                for cont in contours:
                    x, y, w, h = cv2.boundingRect(cont)
                    pred_mask[batch, channel, y:y+h, x:x+w] = 1

        # 矩形に囲った場所でも、予測値の低い箇所は0にする。
        pred_mask = np.where(mask < mask_threshold, 0, pred_mask)

        return pred_mask
コード例 #3
0
    def indices_to_tokens(self,
                          batch_indeces: numpy.ndarray) -> List[List[str]]:

        if not isinstance(batch_indeces, numpy.ndarray):
            batch_indeces = batch_indeces.detach().cpu().numpy()

        all_hypotheses = []
        for beam_indices in batch_indeces:
            # Beam search gives us the top k results for each source sentence in the batch
            # but we just want the single best.
            #if len(indices.shape) > 1:
            #    indices = indices[0]
            hypotheses = []
            for indices in beam_indices:
                indices = list(indices)
                # Collect indices till the first end_symbol
                if self._end_index in indices:
                    indices = indices[:indices.index(self._end_index)]
                tokens = [
                    self._vocab.get_token_from_index(
                        x, namespace=self._target_namespace) for x in indices
                ]
                hypotheses.append(tokens)
            all_hypotheses.append(hypotheses)

        return all_hypotheses
コード例 #4
0
    def _transform_single_normal_deep_dream(self, stft: np.ndarray) -> np.ndarray:
        octaves = []
        for i in range(self._n_octaves - 1):
            hw = stft.shape[:2]
            lo = \
                cv2.resize(stft,
                           tuple(np.int32(np.float32(hw[::-1]) / self._octave_scale)))[
                    ..., None]
            hi = stft - cv2.resize(lo, tuple(np.int32(hw[::-1])))[..., None]
            stft = lo
            octaves.append(hi)

        for octave in tqdm.trange(self._n_octaves, desc="Image optimisation"):
            if octave > 0:
                hi = octaves[-octave]
                stft = cv2.resize(stft, tuple(np.int32(hi.shape[:2][::-1])))[
                           ..., None] + hi

            stft = torch.from_numpy(stft).float()
            if self._use_gpu:
                stft = stft.cuda()
            stft = stft.permute((2, 0, 1))

            for i in tqdm.trange(self._number_of_iterations, desc="Octave optimisation"):
                g = self.calc_grad_tiled(stft)
                g /= (g.abs().mean() + 1e-8)
                g *= self._optimisation_step_size
                stft += g

            if self._use_gpu:
                stft = stft.cpu()

            stft = stft.detach().numpy().transpose((1, 2, 0))

        return stft
コード例 #5
0
ファイル: rollout_storage.py プロジェクト: tzadouri/genrl
 def to_torch(self, array: np.ndarray, copy: bool = True) -> torch.Tensor:
     """
     Convert a numpy array to a PyTorch tensor.
     Note: it copies the data by default
     :param array: (np.ndarray)
     :param copy: (bool) Whether to copy or not the data
         (may be useful to avoid changing things be reference)
     :return: (torch.Tensor)
     """
     if copy:
         return array.detach().clone()
     return array
コード例 #6
0
    def save(self, data: np.ndarray, meta_data=None):
        """Save data into the cache dictionary. The metadata should have the following key:
            - ``'filename_or_obj'`` -- save the data corresponding to file name or object.
        If meta_data is None, use the default index from 0 to save data instead.

        Args:
            data (Tensor or ndarray): target data content that save into cache.
            meta_data (dict): the meta data information corresponding to the data.

        """
        save_key = meta_data["filename_or_obj"] if meta_data else str(self._data_index)
        self._data_index += 1
        if torch.is_tensor(data):
            data = data.detach().cpu().numpy()
        self._cache_dict[save_key] = data.astype(np.float32)
コード例 #7
0
 def apply_to_tensor(self, tensor: numpy.ndarray, *, name: str, idx: int,
                     meta: List[dict]):
     if isinstance(tensor, torch.Tensor):
         if self.device == "numpy":
             return tensor.detach().cpu().numpy().astype(self.dtype)
         else:
             return tensor.to(dtype=getattr(torch, self.dtype),
                              device=torch.device(self.device),
                              non_blocking=self.non_blocking)
     elif isinstance(tensor, numpy.ndarray):
         if self.device == "numpy":
             assert not self.non_blocking, "'non_blocking' not supported for numpy.ndarray"
             return tensor.astype(self.dtype, **self.numpy_kwargs)
         else:
             return torch.from_numpy(tensor.astype(dtype=self.dtype)).to(
                 dtype=getattr(torch, self.dtype),
                 device=torch.device(self.device))
     else:
         raise NotImplementedError(type(tensor))
コード例 #8
0
ファイル: save.py プロジェクト: ninfueng/ninpy
def np2cpp(
    array: np.ndarray,
    type_var: str,
    name_var: str,
    name_file: str,
    mode: str,
    header_guard: bool = True,
) -> None:
    """Convert Python 1-4 dimensional array into cpp array.
    TODO: Adding comments sections (adding string) at header file.
    Args:
        array: A Python numpy 1-4D array.
        type_var (str): A string with C++ type 'float', 'int', ..
        name_var (str): A string assigned to name of variable in C++ environment.
        name_file (str): A string assigned to name of C++ file.
        mode (str): A string assigned to mode of open(, ): 'w', 'a'.
        header_guard (bool): Generate header guard.
    Raise:
        NotImplementedError: For array more dimension than 4.
    """
    if isinstance(array, Tensor):
        array = array.detach().cpu().numpy()

    array = np.array(array)
    assert isinstance(type_var, str)
    assert isinstance(name_var, str)
    assert isinstance(name_file, str)
    assert isinstance(mode, str)
    assert isinstance(header_guard, bool)
    assert mode in ["w", "a"]

    # Get stem and suffix from name_file.
    name_stem = Path(name_file)
    name_stem = name_stem.stem + name_stem.suffix
    with open(name_file, mode) as file:
        if header_guard:
            # Generate guard band for cpp header file.
            header_name = name_stem.upper()
            file.write(f"#ifndef __{header_name}__\n".replace(".", "_"))
            file.write(f"#define __{header_name}__\n".replace(".", "_"))
        file.write("\n")

        if len(array.shape) == 1:
            file.write(type_var + " " + name_var + str([array.shape[0]]) +
                       " {")
            for i in range(array.shape[0]):
                file.write(str(array[i]))
                file.write(",")

        elif len(array.shape) == 2:
            file.write(type_var + " " + name_var + str([array.shape[0]]) +
                       str([array.shape[1]]) + " {")
            for i in range(array.shape[0]):
                file.write("{")
                for j in range(array.shape[1]):
                    file.write(str(array[i][j]))
                    file.write(",")
                file.write("},")

        elif len(array.shape) == 3:
            file.write(type_var + " " + name_var + str([array.shape[0]]) +
                       str([array.shape[1]]) + str([array.shape[2]]) + " {")
            for i in range(array.shape[0]):
                file.write("{")
                for j in range(array.shape[1]):
                    file.write("{")
                    for k in range(array.shape[2]):
                        file.write(str(array[i][j][k]))
                        file.write(",")
                    file.write("},")
                file.write("},")

        elif len(array.shape) == 4:
            file.write(type_var + " " + name_var + str([array.shape[0]]) +
                       str([array.shape[1]]) + str([array.shape[2]]) +
                       str([array.shape[3]]) + " {")
            for i in range(array.shape[0]):
                file.write("{")
                for j in range(array.shape[1]):
                    file.write("{")
                    for k in range(array.shape[2]):
                        file.write("{")
                        for l in range(array.shape[3]):
                            file.write(str(array[i][j][k][l]))
                            file.write(",")
                        file.write("},")
                    file.write("},")
                file.write("},")

        else:
            raise NotImplementedError(
                "array can have dimensional from 1-4."
                f"However you input has shape as {len(array.shape)}")

        file.write("};\n")
        if header_guard:
            # Generate guard band for cpp header file.
            file.write("\n")
            file.write("#endif")