コード例 #1
0
    def _transform_single_normal_deep_dream(self, stft: np.ndarray) -> np.ndarray:
        octaves = []
        for i in range(self._n_octaves - 1):
            hw = stft.shape[:2]
            lo = \
                cv2.resize(stft,
                           tuple(np.int32(np.float32(hw[::-1]) / self._octave_scale)))[
                    ..., None]
            hi = stft - cv2.resize(lo, tuple(np.int32(hw[::-1])))[..., None]
            stft = lo
            octaves.append(hi)

        for octave in tqdm.trange(self._n_octaves, desc="Image optimisation"):
            if octave > 0:
                hi = octaves[-octave]
                stft = cv2.resize(stft, tuple(np.int32(hi.shape[:2][::-1])))[
                           ..., None] + hi

            stft = torch.from_numpy(stft).float()
            if self._use_gpu:
                stft = stft.cuda()
            stft = stft.permute((2, 0, 1))

            for i in tqdm.trange(self._number_of_iterations, desc="Octave optimisation"):
                g = self.calc_grad_tiled(stft)
                g /= (g.abs().mean() + 1e-8)
                g *= self._optimisation_step_size
                stft += g

            if self._use_gpu:
                stft = stft.cpu()

            stft = stft.detach().numpy().transpose((1, 2, 0))

        return stft
コード例 #2
0
    def __call__(
        self,
        x: np.ndarray,
        y: Optional[np.ndarray] = None
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Apply preprocessing to input `x` and labels `y`.

        :param x: Sample to smooth with shape `(batch_size, width, height, depth)`.
        :param y: Labels of the sample `x`. This function does not affect them in any way.
        :return: Smoothed sample.
        """
        import torch  # lgtm [py/repeated-import]

        x = torch.tensor(x, device=self.device)
        if y is not None:
            y = torch.tensor(y, device=self.device)

        with torch.no_grad():
            x, y = self.forward(x, y)

        result = x.cpu().numpy()
        if y is not None:
            y = y.cpu().numpy()
        return result, y
コード例 #3
0
def full_combination(x: np.ndarray):
    """Take the full product of the dimension-wise combinations of input data."""
    perms = np.array(
        list(itertools.product(range(x.shape[0]), repeat=x.shape[1])))
    x_cpu = x.cpu().detach().numpy()
    # return np.hstack(
    #     tuple(x[perms[:, d], d][:, None] for d in range(x.shape[1])))
    return np.hstack(
        tuple(x_cpu[perms[:, d], d][:, None] for d in range(x.shape[1])))
コード例 #4
0
 def transform_ob(self, ob: np.ndarray) -> np.ndarray:
     ob = self.preprocess_ob(ob)
     with torch.no_grad():
         ob = torch.from_numpy(ob).float().to(self.device)
         ob = self.autoencoder.encode(ob)
         # Move back to memory first, this is required when converting Tensor that is on CUDA device
         ob_cpu = ob.cpu().clone().numpy()
         del ob
         torch.cuda.empty_cache()
         return ob_cpu
コード例 #5
0
ファイル: emulator.py プロジェクト: SensorsINI/v2e
 def _show(self, inp: np.ndarray):
     inp = np.array(inp.cpu().data.numpy())
     min = np.min(inp)
     norm = (np.max(inp) - min)
     if norm == 0:
         logger.warning('image is blank, max-min=0')
         norm = 1
     img = ((inp - min) / norm)
     cv2.imshow(__name__ + ':' + self.show_input, img)
     cv2.waitKey(30)
コード例 #6
0
def calculate_metrics(gt_img: np.ndarray,
                      recon_img: np.ndarray,
                      verbose=True) -> tuple:
    """
    Display PSNR, SSIM, SNR and MSE for reconstructed image
    against ground truth.

    Args:
        gt_img: groud truth image
        recon_img: reconstructed image
        verbose: wether to print the metrics or just return them
    Returns:
        tuple of metrics
    """
    assert gt_img.shape == recon_img.shape

    if isinstance(gt_img, Tensor):
        gt_img = gt_img.cpu().detach().numpy()
    if isinstance(recon_img, Tensor):
        recon_img = recon_img.cpu().detach().numpy()

    gt_img = np.array(gt_img, dtype=np.float64)
    recon_img = np.array(recon_img, dtype=np.float64)

    psnr = peak_signal_noise_ratio(gt_img,
                                   recon_img,
                                   data_range=recon_img.max() -
                                   recon_img.min())
    img_ssim = ssim(gt_img,
                    recon_img,
                    data_range=recon_img.max() - recon_img.min())
    snr = calculate_snr(gt_img, recon_img)
    mse = mean_squared_error(gt_img, recon_img)

    if verbose:
        print('============================')
        print(f'PSNR: {psnr}')
        print(f'SSIM: {img_ssim}')
        print(f'SNR: {snr}')
        print(f'MSE: {mse}')
        print('============================')

    return psnr, img_ssim, snr, mse
コード例 #7
0
 def _apply_impl(self, image: np.ndarray, **kwargs):
     assert self._image_info.check_input_image(image)
     image = self._pre.scale_data(image)
     image = self._pre.force_dims(image)
     image = self._pre.grayscale(image)
     image = self._pre.scale_values(image)
     image = self._pre.torchify(image)
     image = self._pre.sobelize(image)
     np_image = image.cpu().numpy().transpose(1, 2, 0)
     assert self._image_info.check_output_eval_image(np_image)
     return {"image": image, **kwargs}
コード例 #8
0
    def process(self, state: np.ndarray) -> torch.Tensor:
        Logger.debug(f"Processing.\nIn shape: {state.shape}")
        state = self.gray(state)
        state = self.resize(state, self.config.shrink_size)
        state = torch.from_numpy(state)
        state = self.stack(state)

        state = state.cpu().detach()  #.numpy()

        Logger.debug(f"Out shape: {state.shape}")
        return state
コード例 #9
0
ファイル: helpers.py プロジェクト: Borda/metrics
def _compute_sklearn_metric(
    preds: Union[Tensor, array],
    target: Union[Tensor, array],
    indexes: np.ndarray = None,
    metric: Callable = None,
    empty_target_action: str = "skip",
    ignore_index: int = None,
    reverse: bool = False,
    **kwargs,
) -> Tensor:
    """Compute metric with multiple iterations over every query predictions set."""

    if indexes is None:
        indexes = np.full_like(preds, fill_value=0, dtype=np.int64)
    if isinstance(indexes, Tensor):
        indexes = indexes.cpu().numpy()
    if isinstance(preds, Tensor):
        preds = preds.cpu().numpy()
    if isinstance(target, Tensor):
        target = target.cpu().numpy()

    assert isinstance(indexes, np.ndarray)
    assert isinstance(preds, np.ndarray)
    assert isinstance(target, np.ndarray)

    if ignore_index is not None:
        valid_positions = target != ignore_index
        indexes, preds, target = indexes[valid_positions], preds[
            valid_positions], target[valid_positions]

    indexes = indexes.flatten()
    preds = preds.flatten()
    target = target.flatten()
    groups = get_group_indexes(indexes)

    sk_results = []
    for group in groups:
        trg, pds = target[group], preds[group]

        if ((1 - trg) if reverse else trg).sum() == 0:
            if empty_target_action == "skip":
                pass
            elif empty_target_action == "pos":
                sk_results.append(1.0)
            else:
                sk_results.append(0.0)
        else:
            res = metric(trg, pds, **kwargs)
            sk_results.append(res)

    if len(sk_results) > 0:
        return np.mean(sk_results)
    return np.array(0.0)
コード例 #10
0
 def lab2rgb(L: np.ndarray, ab: np.ndarray) -> np.ndarray:
     """
    function which convert a lab image to rgb, normalized between [0-1]
    :param L: input channel
    :param ab: input channels
    :return: converted image
    """
     L = L.cpu().numpy()
     ab = ab.cpu().detach().numpy()
     Lab = np.concatenate((L, ab), axis=1)
     Lab = np.transpose(Lab, (0, 2, 3, 1))
     B, W, H, C = Lab.shape[0], Lab.shape[1], Lab.shape[2], Lab.shape[3]
     # reshape to convert all the images in the batch without iteration
     Lab = np.reshape(Lab, (B * W, H, C))
     Lab *= 255
     Lab[:, :, 0] *= 100 / 255
     Lab[:, :, 1] -= 128
     Lab[:, :, 2] -= 128
     rgb = cv2.cvtColor(Lab, cv2.COLOR_LAB2RGB)
     rgb = np.reshape(rgb, (B, W, H, C))
     rgb = np.transpose(rgb, (0, 3, 1, 2))
     rgb = torch.from_numpy(rgb)
     return rgb
コード例 #11
0
def calculate_matrix_column_distances(mat: numpy.ndarray, idx_to_cls):
    mat = mat.cpu().detach().numpy()
    print(mat.shape)
    # initialize columns
    dist_mat = {'cls': [], 'idx': []}
    for idx in idx_to_cls:
        dist_mat[idx_to_cls[idx]] = []

    # for every row
    for i in tqdm.tqdm(range(mat.shape[0])):
        dist_mat['idx'].append(i)
        dist_mat['cls'].append(idx_to_cls[i])
        # for every column
        for j in range(mat.shape[0]):
            # calculate distance
            dist_mat[idx_to_cls[j]].append(
                spatial.distance.cosine(mat[i], mat[j]))
    return pd.DataFrame(dist_mat)
コード例 #12
0
ファイル: selectors.py プロジェクト: freefeynman123/Triplets
    def get_triplets(
            self,
            embeddings: torch.Tensor,
            labels: np.ndarray
    ) -> torch.LongTensor:
        labels = labels.cpu().data.numpy()
        triplets = []
        for label in set(labels):
            label_mask = (labels == label)
            label_indices = np.where(label_mask)[0]
            if len(label_indices) < 2:
                continue
            negative_indices = np.where(np.logical_not(label_mask))[0]
            anchor_positives = list(combinations(label_indices, 2))  # All anchor-positive pairs

            # Add all negatives for all positive pairs
            temp_triplets = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives
                             for neg_ind in negative_indices]
            triplets += temp_triplets

        return torch.LongTensor(np.array(triplets))
コード例 #13
0
ファイル: selectors.py プロジェクト: freefeynman123/Triplets
    def get_triplets(
            self,
            embeddings: torch.Tensor,
            labels: np.ndarray
    ):
        if self.cpu:
            embeddings = embeddings.cpu()
        if len(embeddings.shape) > 2:
            embeddings = embeddings.squeeze()
        distance_matrix = pdist(embeddings)
        distance_matrix = distance_matrix.cpu()

        labels = labels.cpu().data.numpy()
        triplets = []

        for label in set(labels):
            label_mask = (labels == label)
            label_indices = np.where(label_mask)[0]
            if len(label_indices) < 2:
                continue
            negative_indices = np.where(np.logical_not(label_mask))[0]
            anchor_positives = list(combinations(label_indices, 2))  # All anchor-positive pairs
            anchor_positives = np.array(anchor_positives)

            ap_distances = distance_matrix[anchor_positives[:, 0], anchor_positives[:, 1]]
            for anchor_positive, ap_distance in zip(anchor_positives, ap_distances):
                loss_values = ap_distance - distance_matrix[
                    torch.LongTensor(np.array([anchor_positive[0]])), torch.LongTensor(negative_indices)] + self.margin
                loss_values = loss_values.data.cpu().numpy()
                hard_negative = self.negative_selection_fn(loss_values)
                if hard_negative is not None:
                    hard_negative = negative_indices[hard_negative]
                    triplets.append([anchor_positive[0], anchor_positive[1], hard_negative])

        if len(triplets) == 0:
            triplets.append([anchor_positive[0], anchor_positive[1], negative_indices[0]])

        triplets = np.array(triplets)

        return torch.LongTensor(triplets)
コード例 #14
0
    def __call__(
        self,
        x: np.ndarray,
        y: Optional[np.ndarray] = None
    ) -> Tuple[np.ndarray, Optional[np.ndarray]]:
        """
        Perform data preprocessing and return preprocessed data as tuple.
        :param x: Dataset to be preprocessed.
        :param y: Labels to be preprocessed.
        :return: Preprocessed data.
        """
        x_copy = x + 0
        original_shape = x.shape

        # ============
        if isinstance(x, np.ndarray):
            x = torch.from_numpy(x).to(device)

        if len(original_shape) == 3:
            x = x.squeeze(1)

        with torch.no_grad():
            # NOTE BUG this could throw error in torch==1.6.0
            mel = extract_melspectrogram(x.cpu())

            # FIXME: why does it always get me an extra?
            mel = mel[..., :-1].to(device)
            x_denoised = (self.denoiser(
                mel, x,
                self.severity).clamp(*self.clip_values).to("cpu").numpy())

        x_denoised = x_denoised.astype(ART_NUMPY_DTYPE)

        new_sequence_len = x_denoised.shape[-1]
        x_copy[..., :new_sequence_len] = x_denoised  # HACK
        return (x_copy, y)
コード例 #15
0
    def forward(
        self,
        input: torch.tensor,
        lengths: np.ndarray = None,
        prev_hidden_states: Tuple[torch.tensor] = None
    ) -> Tuple[torch.tensor, Tuple[torch.tensor, torch.tensor]]:
        '''
        Input size: (B ,S)
        [input]: input traces. (B,S)
        [lengths]: length of traces. (B)
        [previous_hidden_state]: hidden state in last time step, should be (h_, c_) ===Size===> ((num_layers, batch_size, lstm_hidden), (num_layers, batch_size, lstm_hidden))
        --------------
        return: output, hidden_state ===Size===> (B, S, vocab_size), ((num_layers, batch_size, lstm_hidden), (num_layers, batch_size, lstm_hidden))
        '''

        batch_size = input.size(0)

        ############ Prepare hidden state input ############
        if not prev_hidden_states is None:
            if (len(prev_hidden_states)) != 2:
                raise Exception(
                    "The length of given previous hidden state is not correct, expected %d, but get %d"
                    % (2, len(prev_hidden_states)))
            expected_previous_state_size = (self.lstm.num_layers, batch_size,
                                            self.lstm.hidden_size)
            if prev_hidden_states[0].size() != expected_previous_state_size:
                raise Exception(
                    "The expected size from previous state is %s, the input has size %s"
                    % (str(expected_previous_state_size),
                       str(tuple(prev_hidden_states[0].size()))))

            if prev_hidden_states[1].size() != expected_previous_state_size:
                raise Exception(
                    "The expected size from previous state is %s, the input has size %s"
                    % (str(expected_previous_state_size),
                       str(tuple(prev_hidden_states[1].size()))))
            input_hidden_state = prev_hidden_states
        else:
            input_hidden_state = (self.h0.repeat(1, batch_size, 1),
                                  self.c0.repeat(1, batch_size, 1))

        ############ Embedding layer ############
        out = self.emb(input)  # ( B, S, F )

        ############ LSTM ############
        if not lengths is None:
            out = pack_padded_sequence(out,
                                       lengths=lengths.cpu(),
                                       batch_first=True)
            out, (h_out, c_out) = self.lstm(out,
                                            input_hidden_state)  # ( B, S, F)
            out, _ = pad_packed_sequence(out, batch_first=True)
        else:
            out, (h_out, c_out) = self.lstm(out,
                                            input_hidden_state)  # ( B, S, F)

        ############ BatchNorm and last NN ############
        out = self.batchnorm(out.transpose(2, 1)).transpose(2, 1)  # (B, F, S)
        out = F.softmax(self.output_net(out), dim=-1)  # (B, S, vocab_size)

        return out, (h_out, c_out)
コード例 #16
0
ファイル: utils.py プロジェクト: VahidZee/reason-aware-raster
def write_pred_csv_data(
    writer: csv.DictWriter,
    confs_keys: list,
    coords_keys_list: list,
    timestamps: np.ndarray,
    track_ids: np.ndarray,
    result,
) -> None:

    coords = result["pred"]
    confs = result["conf"].cpu().detach().numpy().copy()
    assert len(coords.shape) in [3, 4]

    if len(coords.shape) == 3:
        assert confs is None  # no conf for the single-mode case
        coords = np.expand_dims(coords, 1)  # add a new axis for the multi-mode
        confs = np.ones((len(coords), 1))  # full confidence

    num_example, num_modes, future_len, num_coords = coords.shape
    assert num_coords == 2
    assert timestamps.shape == track_ids.shape == (num_example, )
    assert confs is not None and confs.shape == (num_example, num_modes)
    assert np.allclose(np.sum(confs, axis=-1), 1.0)
    assert num_modes <= MAX_MODES

    # generate always a fixed size json for MAX_MODES by padding the arrays with zeros
    coords_padded = np.zeros((num_example, MAX_MODES, future_len, num_coords),
                             dtype=coords.dtype)
    coords_padded[:, :num_modes] = coords
    confs_padded = np.zeros((num_example, MAX_MODES), dtype=confs.dtype)
    confs_padded[:, :num_modes] = confs

    for idx, gs, gv, gt, nll, loss, timestamp, track_id, coord, conf in zip(
            result["idx"].cpu().numpy().copy(),
            result["grads/semantics"].cpu().numpy().copy(),
            result["grads/vehicles"].cpu().numpy().copy(),
            result["grads/total"].cpu().numpy().copy(),
            result["nll"].cpu().detach().numpy().copy(),
            result["loss"].cpu().detach().numpy().copy(),
            timestamps.cpu().numpy().copy(),
            track_ids.cpu().numpy().copy(), coords_padded, confs_padded):
        line = {
            "idx": idx,
            "grads/semantics": gs,
            "grads/vehicles": gv,
            "grads/total": gt,
            "nll": nll,
            "loss": loss,
            "timestamp": timestamp,
            "track_id": track_id
        }
        line.update({key: con for key, con in zip(confs_keys, conf)})

        for idx in range(MAX_MODES):
            line.update({
                key: f"{cor:.5f}"
                for key, cor in zip(coords_keys_list[idx], coord[idx].reshape(
                    -1))
            })

        writer.writerow(line)