예제 #1
0
    def asof_locs(self, where, mask: np.ndarray) -> np.ndarray:
        """
        where : array of timestamps
        mask : array of booleans where data is not NA
        """
        where_idx = where
        if isinstance(where_idx, DatetimeIndex):
            where_idx = PeriodIndex(where_idx._values, freq=self.freq)
        elif not isinstance(where_idx, PeriodIndex):
            raise TypeError(
                "asof_locs `where` must be DatetimeIndex or PeriodIndex")
        elif where_idx.freq != self.freq:
            raise raise_on_incompatible(self, where_idx)

        locs = self.asi8[mask].searchsorted(where_idx.asi8, side="right")

        locs = np.where(locs > 0, locs - 1, 0)
        result = np.arange(len(self))[mask].take(locs)

        first = mask.argmax()
        result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1

        return result
예제 #2
0
def top_k_accuracy(y_true: np.ndarray, proba_mat: np.ndarray,
                   k: int) -> float:
    """
    Computes the top K accuracy for a given set of predictions
    """

    # Account for the case when k = 1
    n = y_true.shape[0]
    if k == 1:
        # The top 1 prediction is simply the argmax of the probabilities
        y_pred = proba_mat.argmax(axis=1)

        # Compute the accuracy
        return np.sum(y_true == y_pred) / n
    else:
        p = proba_mat.shape[1]

        # Get the top k predictions
        top_k_pred = np.argsort(proba_mat, axis=1)[:, (p - k):]

        # Go through each sample and see if the given true sample
        # belongs in the top k
        in_top_k = [y_true[i] in top_k_pred[i, :] for i in range(n)]
        return np.sum(in_top_k) / n
예제 #3
0
    def step_simulation(self,
                        action: np.ndarray,
                        fixed_steps: int = 1) -> AtariState:
        """
        Perturb the simulator with an arbitrary action.
        :param action: int representing the action to be taken.
        :param fixed_steps: The number of consecutive times that the action will be applied. This
                            allows us to set the frequency at which the policy will play.
        :return: State representing the state of the environment after taking the desired number of
                steps.
        """
        end = False
        # _dead = False We will be deactivating this hack for now.
        for i in range(fixed_steps):
            observed, reward, _end, lives = self.env.step(action.argmax())
            end = end or _end
            # _dead = _dead or reward < 0
            self._cum_reward += reward
            if end:
                break

        if self.clone_seeds:
            microstate = self.env.unwrapped.ale.cloneSystemState()

        else:
            microstate = self.env.unwrapped.ale.cloneState()

        self.state.update_state(observed=observed,
                                reward=self._cum_reward,
                                end=end,
                                lives=lives,
                                microstate=Microstate(self.env, microstate))
        # self.state._dead = _dead
        if end:
            self.env.reset()
        return self.state
예제 #4
0
    def get_saliency(self, state: np.ndarray,
                     q_values: np.ndarray) -> np.ndarray:
        assert state.size == self.state_size, "saliency cannot be computed during training"

        self.update_state_value_range(state)

        saliency = np.zeros_like(state)
        action: int = q_values.argmax()
        q_values_dict = {i: q / 100 for i, q in enumerate(q_values.squeeze())}

        for _ in range(self.SALIENCY_TRIALS):
            for i in range(self.state_size):
                perturbed_state = self.perturb(state, i)
                perturbed_q_values = self.get_q_values(perturbed_state)
                perturbed_q_values_dict = {
                    j: q / 100
                    for j, q in enumerate(perturbed_q_values.squeeze())
                }

                saliency[i] += computeSaliencyUsingSarfa(
                    action, q_values_dict,
                    perturbed_q_values_dict)[0] / self.SALIENCY_TRIALS

        return saliency
예제 #5
0
def max2d(X: np.ndarray) -> (tuple, float):
    """Get position and value of array maximum."""
    row, col = np.unravel_index(X.argmax(), X.shape)
    return (row, col), X[row, col]
예제 #6
0
def decode_mst(energy: numpy.ndarray,
               length: int,
               has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:
    """
    Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
    minimum spanning arboresences on graphs.

    Parameters
    ----------
    energy : ``numpy.ndarray``, required.
        A tensor with shape (num_labels, timesteps, timesteps)
        containing the energy of each edge. If has_labels is ``False``,
        the tensor should have shape (timesteps, timesteps) instead.
    length : ``int``, required.
        The length of this sequence, as the energy may have come
        from a padded batch.
    has_labels : ``bool``, optional, (default = True)
        Whether the graph has labels or not.
    """
    if has_labels and energy.ndim != 3:
        raise ConfigurationError(
            "The dimension of the energy array is not equal to 3.")
    elif not has_labels and energy.ndim != 2:
        raise ConfigurationError(
            "The dimension of the energy array is not equal to 2.")
    input_shape = energy.shape
    max_length = input_shape[-1]

    # Our energy matrix might have been batched -
    # here we clip it to contain only non padded tokens.
    if has_labels:
        energy = energy[:, :length, :length]
        # get best label for each edge.
        label_id_matrix = energy.argmax(axis=0)
        energy = energy.max(axis=0)
    else:
        energy = energy[:length, :length]
        label_id_matrix = None
    # get original score matrix
    original_score_matrix = energy
    # initialize score matrix to original score matrix
    score_matrix = numpy.array(original_score_matrix, copy=True)

    old_input = numpy.zeros([length, length], dtype=numpy.int32)
    old_output = numpy.zeros([length, length], dtype=numpy.int32)
    current_nodes = [True for _ in range(length)]
    representatives: List[Set[int]] = []

    for node1 in range(length):
        original_score_matrix[node1, node1] = 0.0
        score_matrix[node1, node1] = 0.0
        representatives.append({node1})

        for node2 in range(node1 + 1, length):
            old_input[node1, node2] = node1
            old_output[node1, node2] = node2

            old_input[node2, node1] = node2
            old_output[node2, node1] = node1

    final_edges: Dict[int, int] = {}

    # The main algorithm operates inplace.
    chu_liu_edmonds(length, score_matrix, current_nodes, final_edges,
                    old_input, old_output, representatives)

    heads = numpy.zeros([max_length], numpy.int32)
    if has_labels:
        head_type = numpy.ones([max_length], numpy.int32)
        # Set the head type of the symbolic head to be zero, arbitrarily.
        head_type[0] = 0
    else:
        head_type = None

    for child, parent in final_edges.items():
        heads[child] = parent
        if has_labels and child != 0:
            head_type[child] = label_id_matrix[parent, child]

    # Set the head of the symbolic head to be zero, arbitrarily.
    heads[0] = 0

    return heads, head_type
예제 #7
0
파일: lib.py 프로젝트: StitchDeng/keras_npi
 def decode_params(env_observation: np.ndarray, arguments: IntegerArguments):
     return env_observation.argmax(axis=1), arguments.decode_all()
def plot_confusion_matrix(y_score: np.ndarray, y_test: np.ndarray,
                          labels: List[str]):
    confm = confusion_matrix(y_test.argmax(axis=1), y_score.argmax(axis=1))
    df_cm = pd.DataFrame(confm, index=labels, columns=labels)

    return sns.heatmap(df_cm, cmap="Oranges", annot=True)
예제 #9
0
def make_episode_visualization(img_s: np.ndarray,
                               img_q: np.ndarray,
                               gt_s: np.ndarray,
                               gt_q: np.ndarray,
                               preds: np.ndarray,
                               save_path: str,
                               mean: List[float] = [0.485, 0.456, 0.406],
                               std: List[float] = [0.229, 0.224, 0.225]):

    # 0) Preliminary checks
    assert len(img_s.shape) == 4, f"Support shape expected : K x 3 x H x W or K x H x W x 3. Currently: {img_s.shape}"
    assert len(img_q.shape) == 3, f"Query shape expected : 3 x H x W or H x W x 3. Currently: {img_q.shape}"
    assert len(preds.shape) == 4, f"Predictions shape expected : T x num_classes x H x W. Currently: {preds.shape}"
    assert len(gt_s.shape) == 3, f"Support GT shape expected : K x H x W. Currently: {gt_s.shape}"
    assert len(gt_q.shape) == 2, f"Query GT shape expected : H x W. Currently: {gt_q.shape}"
    # assert img_s.shape[-1] == img_q.shape[-1] == 3, "Images need to be in the format H x W x 3"
    if img_s.shape[1] == 3:
        img_s = np.transpose(img_s, (0, 2, 3, 1))
    if img_q.shape[0] == 3:
        img_q = np.transpose(img_q, (1, 2, 0))

    assert img_s.shape[-3:-1] == img_q.shape[-3:-1] == gt_s.shape[-2:] == gt_q.shape

    if img_s.min() <= 0:
        img_s *= std
        img_s += mean

    if img_q.min() <= 0:
        img_q *= std
        img_q += mean

    T, num_classes, H, W = preds.shape
    K = img_s.shape[0]

    # Create Grid
    n_rows = T+1
    n_columns = num_classes + 1
    fig = plt.figure(figsize=(20, 5), dpi=300.)
    grid = ImageGrid(fig, 111,
                     nrows_ncols=(n_rows, n_columns),
                     axes_pad=(0.1, 0.3),
                     direction='row',
                     )

    # 1) visualize the support and query objects with ground-truth
    start = int((num_classes+1) / 2) - int((K+1) / 2)
    for j in range(n_columns):
        ax = grid[j]
        if j == start + K:
            img = img_q
            mask = gt_q
            make_plot(ax, img, mask)
        elif j >= start and j < start + K:
            img = img_s[j - start]
            mask = gt_s[j - start]
            make_plot(ax, img, mask)
        ax.axis('off')

    # 2) Visualize the predictions evolving with time
    img = img_q
    for i in range(1, n_rows):
        for j in range(n_columns):
            ax = grid[n_columns*i + j]
            ax.axis('off')
            if j == 0:
                # Overall prediction
                mask = preds.argmax(1)[i-1]
                make_plot(ax,
                          img,
                          mask,
                          cmap_names=cmaps[:num_classes],
                          classes=range(1, num_classes))
                ax.text(-W // 3, H // 2, fr"$t = {i-1}$", rotation=90,
                        verticalalignment='center', fontsize=14)
            else:
                # Overall prediction
                mask = preds[i-1, j-1]
                make_plot(ax,
                          img,
                          mask)
    fig.tight_layout()
    fig.savefig(save_path)
    fig.clf()
예제 #10
0
 def __one_hot_to_labels(self, y: np.ndarray) -> np.ndarray:
     return y.argmax(axis=1)
예제 #11
0
    def add_annotations_v2(
        self,
        img_id: int,
        probs: np.ndarray,
        min_conf: float,
        start_index=1,
        largest_group_only: bool = False,
    ) -> List[int]:
        """
        add the annotation from the given logits

        Args:
            img_id (int): the id of the image to associate the annotations
            probs (np.ndarray): an array of shape [NClasses, Height, Width] that contains a vector with probabilities
            min_conf (float): the minimum confidence used to filter the generated masks
            cats_idxs (List, optional): A list that maps the. Defaults to None.
            start_index (int, optional): the index to start generating the coco polygons.
                Normally, 0 encodes the background. Defaults to 1.

        Raises:
            ValueError: if the shape of masks is different than 2
            ValueError: if the shape of probs is different than 3

        Returns:
            List[int]: [the idx of the annotations added]
        """

        if not isinstance(probs, np.ndarray):
            raise ValueError(
                f"the mask type should be a numpy array not a {type(probs)}")

        if len(probs.shape) != 3:
            raise ValueError("masks.shape should equal to 3")

        masks = probs.argmax(0)

        annotation_ids = []
        for cat_idx in np.unique(masks)[start_index:]:
            mask = (masks == cat_idx).astype(np.uint8)

            cat_id = int(cat_idx)
            if cat_id not in self.cats:
                raise ValueError(f"cats {cat_id} not in dataset categories")

            groups, n_groups = scipy.ndimage.label(mask)
            group_to_consider = list(range(1, n_groups + 1))
            if largest_group_only:
                values, counts = np.unique(groups, return_counts=True)
                group_to_consider = [values[np.argmax(counts[1:]) + 1]]

            # get the groups starting from label 1
            for group_idx in group_to_consider:
                group_mask = (groups == group_idx).astype(np.uint8)
                if group_mask.sum() == 0:
                    continue

                polygons = maskutils.mask_to_polygon(group_mask)
                if len(polygons) == 0:
                    continue

                bbox = maskutils.bbox_from_mask(group_mask)
                if bbox[0] is None:
                    continue

                # FIXME can have problems
                # bbox = maskutils.bbox(polygons, *masks.shape).tolist()
                # an exception is generated when the mask has less than 3 points
                area = int(maskutils.area(group_mask))
                if area == 0:
                    continue
                group_prob_mask = group_mask * probs[cat_idx]
                conf = float(np.median(group_prob_mask[group_prob_mask > 0]))
                if conf < min_conf:
                    continue

                annotation_ids.append(
                    self.add_annotation(img_id, cat_id, polygons, area, bbox,
                                        0, conf))
        return annotation_ids
def choose_mmr(features: numpy.ndarray,
               scores: numpy.ndarray,
               n: int,
               l: float = 0.5) -> Sequence[int]:
    """Chooses n scores using maximal marginal relevance.

    Notes
    -----
    Scores are chosen from highest to lowest. If there are less scores to choose
    from than requested, all scores will be returned in order of preference.

    Parameters
    ----------
    scores
        1D array of scores.
    n
        Number of scores to choose.
    l
        Lambda parameter for MMR. l = 1 gives a relevance-ranked list and l = 0
        gives a maximal diversity ranking.

    Returns
    -------
    Sequence[int]
        List of indices of scores chosen.
    """
    if n < 0:
        raise ValueError('n must be a non-negative integer.')

    if n == 0:
        return []

    selections = [scores.argmax()]
    selections_set = set(selections)

    logging.debug('Running MMR.')
    dists = []
    dists_matrix = None
    while len(selections) < n:
        if len(selections) % (n // 10) == 0:
            logging.debug('MMR epoch {}/{}.'.format(len(selections), n))
        # Compute distances for last selection.
        last = features[selections[-1]:selections[-1] + 1]
        last_dists = numpy.linalg.norm(features - last, axis=1)
        dists.append(last_dists)
        dists_matrix = numpy.array(dists)

        next_best = None
        next_best_margin = float('-inf')

        for i in range(len(scores)):
            if i in selections_set:
                continue

            margin = l * (scores[i] - (1 - l) * dists_matrix[:, i].max())
            if margin > next_best_margin:
                next_best_margin = margin
                next_best = i

        if next_best is None:
            break

        selections.append(next_best)
        selections_set.add(next_best)

    return selections
예제 #13
0
def test(network: MLP, x_test: np.ndarray, y_test: np.ndarray) -> None:
    y_pred = network.forward(x_test)
    acc = (y_test.argmax(1) == y_pred.argmax(1)).sum() / len(y_pred)
    print(f"acc: {acc}")
예제 #14
0
def get_uri_2d(ranking_1: np.ndarray, ranking_2: np.ndarray,
               right_percent_1: np.ndarray,
               right_percent_2: np.ndarray) -> Dict[str, Any]:
    """Compute splines and the value of Psi and Psi'.

    Args:
        ranking_1 (np.ndarray): Data values or ranks of the data values on list 1, a vector of
            numeric values. Large values need to be significant signals. If small
            values represent significant signals, rank the signals reversely
            (e.g. by ranking negative values) and use the rank as ranking_1.
        ranking_2 (np.ndarray): Data values or ranks of the data values on list 2, a vector of
            numeric values. Large values need to be significant signals. If small
            values represent significant signals, rank the signals reversely
            (e.g. by ranking negative values) and use the rank as ranking_2.
        right_percent_1 (np.ndarray): A numeric vector between 0 and 1 in ascending order. t is the
            right-tail percentage for ranking_1.
        right_percent_2 (np.ndarray):  A numeric vector between 0 and 1 in ascending order. t is the
            right-tail percentage for ranking_2.

    Returns:
        Dict[str, Any]: Spline and the value of Psi and Psi'.
    """
    order_df = pd.DataFrame([ranking_1, ranking_2],
                            index=["ranking_1", "ranking_2"]).T.sort_values(
                                by=["ranking_1", "ranking_2"],
                                axis=0,
                                ascending=False)
    right_percent = np.vstack([right_percent_1, right_percent_2])
    uri = np.apply_along_axis(comp_uri,
                              axis=0,
                              arr=right_percent,
                              data=order_df["ranking_2"].values)
    # compute the derivative of URI vs t using small bins
    uri_binned = uri[::4]
    tt_binned = right_percent_1[::4]
    uri_slope = (uri_binned[1:] - uri_binned[:-1]) / (tt_binned[1:] -
                                                      tt_binned[:-1])
    # smooth uri using spline
    # first find where the jump is and don't fit the jump
    # this is the index on the left
    length = min(
        sum(ranking_1 > 0) / ranking_1.size,
        sum(ranking_2 > 0) / ranking_2.size)

    if length < right_percent_1.max():
        jump_left = np.flatnonzero(right_percent_1 > length)[0] - 1
    else:
        jump_left = right_percent_1.argmax()
    if jump_left < 5:
        jump_left = right_percent_1.size
    uri_spl = interpolate.UnivariateSpline(right_percent_1[:jump_left],
                                           uri[:jump_left],
                                           k=5)
    uri_der = uri_spl(right_percent_1[:jump_left], 1)
    uri_spl_y = uri_spl(right_percent_1[:jump_left])
    return dict(uri=uri,
                uri_slope=uri_slope,
                t_binned=tt_binned[1:],
                uri_spl=dict(x=right_percent_1[:jump_left], y=uri_spl_y),
                uri_der=dict(x=right_percent_1[:jump_left], y=uri_der),
                jump_left=jump_left)
예제 #15
0
def epsilon_greedy(q:np.ndarray, epsilon:float=0.1) -> int:
    """
    """
    z = np.random.rand()
    return np.random.choice(q.shape[0]) if z < epsilon else q.argmax()
예제 #16
0
def visualize_predictions(all_probs: np.ndarray, all_labels: np.ndarray,
                          conf_plot_file: str):

    width = 0.1
    num_bins = 10
    all_bins = [[] for x in range(num_bins)]
    all_bins_stat = [[0, 0, 0.]
                     for _ in range(num_bins)]  # cor_num, num, conf_accum

    pred_probs = all_probs.max(axis=1)
    pred_labels = all_probs.argmax(axis=1)
    # correct_probs = []
    for pred_prob, pred_label, true_label in zip(pred_probs, pred_labels,
                                                 all_labels):
        which_bins = math.floor(pred_prob / width)
        which_bins = min(9, which_bins)  # in case the pred prob is 1.0

        all_bins[which_bins].append((pred_prob, pred_label, true_label))

        all_bins_stat[which_bins][1] += 1
        all_bins_stat[which_bins][2] += pred_prob

        if pred_label == true_label:
            all_bins_stat[which_bins][0] += 1

    all_bins_acc = []
    for bin in all_bins_stat:
        if bin[1] == 0:
            all_bins_acc.append(0.)
        else:
            all_bins_acc.append(float(bin[0]) / bin[1])
    all_bins_conf = []
    for bin in all_bins_stat:
        if bin[1] == 0:
            all_bins_conf.append(0.)
        else:
            all_bins_conf.append(bin[2] / bin[1])

    all_nums = [x[1] for x in all_bins_stat]
    ECE_bin = [
        (all_nums[x] / sum(all_nums)) * abs(all_bins_acc[x] - all_bins_conf[x])
        for x in range(len(all_bins_acc))
    ]

    fracts = [x / sum(all_nums) for x in all_nums]
    acc_fracts = [x * y for x, y in zip(fracts, all_bins_acc)]

    objects = ('0.', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7', '0.8',
               '0.9')
    y_pos = np.arange(len(objects))

    plt.bar(y_pos, fracts, align='center', alpha=0.5, label="dataset fraction")
    plt.bar(y_pos,
            acc_fracts,
            align='center',
            alpha=0.5,
            label="correctly predicted")
    plt.xticks(y_pos, objects)
    plt.ylabel('percentage of dataset')
    plt.title('predictions confidence plot')
    plt.legend(loc="upper right", fontsize=12)

    plt.savefig(conf_plot_file)
    plt.show()
    plt.close()

    return sum(ECE_bin), all_bins_acc, all_bins_conf, all_nums
예제 #17
0
파일: AI.py 프로젝트: GMouYes/AI_Proj
def is_safe_move(grid: np.ndarray, direction: str):
    max_pos = np.unravel_index(grid.argmax(), grid.shape)
    new_max_pos = np.unravel_index(
        quick_merge(grid, direction).argmax(), grid.shape)
    return is_valid_move(grid, direction) and np.all(new_max_pos >= max_pos)
예제 #18
0
 def get_alignment_matrix(
         sim_matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
     m, n = sim_matrix.shape
     forward = np.eye(n)[sim_matrix.argmax(axis=1)]  # m x n
     backward = np.eye(m)[sim_matrix.argmax(axis=0)]  # n x m
     return forward, backward.transpose()
예제 #19
0
    def add_annotations_v3(
        self,
        img_id: int,
        probs: np.ndarray,
        min_conf: float,
        single_group: bool = False,
        approx: float = 0.0,
        relative: bool = False,
    ) -> List[int]:
        """Transforms annotations from a probability mask to a coco format

        Args:
            img_id (int): [description]
            probs (np.ndarray): [description]
            min_conf (float): [description]
            start_index (int, optional): [description]. Defaults to 1.
            largest_group_only (bool, optional): [description]. Defaults to False.
            approx (float, optional): the factor used to approximate the polygons by reducint the number of points

        Returns:
            List[int]: [description]
        """
        annotation_ids = []
        global_mask = probs.argmax(0)
        # iterate over the found classes
        for class_idx in np.unique(global_mask):
            if class_idx == 0:
                continue
            # get the probability mask over the class_idx
            class_prob_mask = probs[class_idx] * (global_mask == class_idx)
            # transform the mask to polygons
            class_polygons = maskutils.mask_to_polygon(class_prob_mask,
                                                       min_score=0.5,
                                                       approx=approx,
                                                       relative=relative)

            if single_group:
                median_conf = np.median(class_prob_mask)
                if median_conf < min_conf:
                    continue

                bbox = maskutils.bbox_from_mask(class_polygons)
                if bbox[0] is None:
                    continue

                area = int(maskutils.area(class_polygons))
                annotation_ids.append(
                    self.add_annotation(img_id, class_idx, class_polygons,
                                        area, bbox, 0, median_conf))
            else:
                # for each polyong in polygons
                for poly in class_polygons:
                    poly_mask = maskutils.polygons_to_mask(
                        [poly], global_mask.shape[0], global_mask.shape[1])
                    poly_mask_prob = poly_mask * probs[class_idx]
                    prob_values = poly_mask_prob[poly_mask_prob > 0]
                    median_conf = float(np.median(prob_values))
                    if median_conf < min_conf:
                        continue

                    bbox = maskutils.bbox_from_mask(poly_mask)
                    if bbox[0] is None:
                        continue
                    area = int(maskutils.area(poly_mask))
                    annotation_ids.append(
                        self.add_annotation(img_id, int(class_idx), [poly],
                                            area, bbox, 0, median_conf))

        return annotation_ids
예제 #20
0
 def get_alignment_matrix(self, sim_matrix: np.ndarray):
     m, n = sim_matrix.shape
     forward = np.eye(n)[sim_matrix.argmax(axis=1)]  # m x n
     backward = np.eye(m)[sim_matrix.argmax(axis=0)]  # n x m
     return forward * backward.transpose()
예제 #21
0
def decode_mst(energy: numpy.ndarray,
               length: int,
               has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:
    """
    Note: Counter to typical intuition, this function decodes the _maximum_
    spanning tree.

    Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for
    maximum spanning arborescences on graphs.

    Parameters
    ----------
    energy : ``numpy.ndarray``, required.
        A tensor with shape (num_labels, timesteps, timesteps)
        containing the energy of each edge. If has_labels is ``False``,
        the tensor should have shape (timesteps, timesteps) instead.
    length : ``int``, required.
        The length of this sequence, as the energy may have come
        from a padded batch.
    has_labels : ``bool``, optional, (default = True)
        Whether the graph has labels or not.
    """
    if has_labels and energy.ndim != 3:
        raise ConfigurationError("The dimension of the energy array is not equal to 3.")
    elif not has_labels and energy.ndim != 2:
        raise ConfigurationError("The dimension of the energy array is not equal to 2.")
    input_shape = energy.shape
    max_length = input_shape[-1]

    # Our energy matrix might have been batched -
    # here we clip it to contain only non padded tokens.
    if has_labels:
        energy = energy[:, :length, :length]
        # get best label for each edge.
        label_id_matrix = energy.argmax(axis=0)
        energy = energy.max(axis=0)
    else:
        energy = energy[:length, :length]
        label_id_matrix = None
    # get original score matrix
    original_score_matrix = energy
    # initialize score matrix to original score matrix
    score_matrix = numpy.array(original_score_matrix, copy=True)

    old_input = numpy.zeros([length, length], dtype=numpy.int32)
    old_output = numpy.zeros([length, length], dtype=numpy.int32)
    current_nodes = [True for _ in range(length)]
    representatives: List[Set[int]] = []

    for node1 in range(length):
        original_score_matrix[node1, node1] = 0.0
        score_matrix[node1, node1] = 0.0
        representatives.append({node1})

        for node2 in range(node1 + 1, length):
            old_input[node1, node2] = node1
            old_output[node1, node2] = node2

            old_input[node2, node1] = node2
            old_output[node2, node1] = node1

    final_edges: Dict[int, int] = {}

    # The main algorithm operates inplace.
    chu_liu_edmonds(length, score_matrix, current_nodes,
                    final_edges, old_input, old_output, representatives)

    heads = numpy.zeros([max_length], numpy.int32)
    if has_labels:
        head_type = numpy.ones([max_length], numpy.int32)
    else:
        head_type = None

    for child, parent in final_edges.items():
        heads[child] = parent
        if has_labels:
            head_type[child] = label_id_matrix[parent, child]

    return heads, head_type
예제 #22
0
 def _prediction_to_action_name(self, prediction: numpy.ndarray) -> str:
     prediction = prediction[0]
     return list(Configuration.get_active().action_intents.all())[
         prediction.argmax()].name
예제 #23
0
파일: utils.py 프로젝트: ksandrill/cnn
def argmax2d(arr: np.ndarray) -> tuple[int, int]:
    idx: int = arr.argmax()
    return idx // arr.shape[1], idx % arr.shape[1]
예제 #24
0
 def forward(self, x: np.ndarray):
     """Encode input to sparse matrix."""
     self.inputs.append(x)
     clss = x.argmax()
     return clss
예제 #25
0
 def label_tranformer(self, batch_label: np.ndarray):
     # label 0 stands for 'Good', label 1 stands for 'Bad'
     if self.categories_num == 3:
         return batch_label
     new_label = (batch_label.argmax(axis=1) != 0).astype('int32')
     return self.temp_array[new_label]
예제 #26
0
def draw_segmentation(
    img: Union[np.ndarray, Image.Image],
    probs: np.ndarray,
    idx_name_dict: Dict[int, str],
    min_conf: float,
    colors: List = None,
    title: str = "",
    ax: plt.Axes = None,
    figsize: Tuple[int, int] = (16, 8),
    alpha: float = 0.3,
    fill: bool = True,
    min_score: float = 0.5,
):
    """draw the result from a segmentation model

    Args:
        img (Union[np.ndarray, Image.Image]): an PIL image or a numpy array
        probs (np.ndarray): it accepts:
            - the logits coming from the model with shape (n_classes, H, W), or
            - the mask coming from true annotations with shape (H,W) and containing pixel classification
        idx_name_dict (Dict[int, str]):
        min_conf (float): the min confidence of the mask given as output
        colors (List, optional): the colors to diplay categories. Defaults to None.
        title (str, optional): [description]. Defaults to ''.
        ax (plt.Axes, optional): [description]. Defaults to None.
        figsize (Tuple[int, int], optional): [description]. Defaults to (16, 8).

    Returns:
        [plt.Axes]: the ax of the given plot
    """
    if colors is None:
        colors = generate_colormap(len(idx_name_dict) + 1)

    if ax is None:
        _, ax = plt.subplots(figsize=figsize)

    if isinstance(img, np.ndarray):
        img = Image.fromarray(img)

    width, height = img.size
    ax.set_ylim(height + 10, -10)
    ax.set_xlim(-10, width + 10)
    ax.axis("off")
    ax.set_title(title)

    out_image = np.array(img).astype(np.uint8)

    if len(probs.shape) == 3:
        cats_mask = probs.argmax(0)
    else:
        cats_mask = probs
        probs = None

    for cat_idx in np.unique(cats_mask):
        if cat_idx == 0:
            continue

        if probs is not None:
            bool_mask = probs[cat_idx] >= min_conf
            conf_mask = probs[cat_idx][bool_mask]

        else:
            bool_mask = cats_mask == cat_idx
            conf_mask = np.array([1])

        conf = np.round(np.nan_to_num(conf_mask.flatten().mean()), 2)
        if conf < min_conf:
            continue

        name = f"{idx_name_dict[cat_idx]} {int(conf * 100)}%"
        color = colors[cat_idx]

        # draw text in the center (defined by median) when box is not drawn
        # median is less sensitive to outliers.
        text_pos = np.median(bool_mask.nonzero(), axis=1)[::-1] - 20
        lighter_color = change_color_brightness(color, brightness_factor=0.7)
        font_size = 10
        draw_text(ax, name, text_pos, font_size, horizontal_alignment="left")

        padded_mask = np.zeros(
            (bool_mask.shape[0] + 2, bool_mask.shape[1] + 2), dtype=np.uint8)
        padded_mask[1:-1, 1:-1] = bool_mask
        contours = measure.find_contours(padded_mask, min_score)
        for verts in contours:
            verts = np.fliplr(verts) - 1
            p = Polygon(
                verts,
                facecolor=color,
                edgecolor=lighter_color,  # 'black',
                fill=fill,
                alpha=alpha,
            )
            ax.add_patch(p)
    ax.imshow(out_image)
    return ax
예제 #27
0
 def decode_params(env_observation: np.ndarray,
                   arguments: IntegerArguments):
     return env_observation.argmax(axis=1), arguments.decode_all()
예제 #28
0
def check(output: np.ndarray, label: np.ndarray) -> bool:
    return output.argmax() == label.argmax()
예제 #29
0
 def build(cls, prediction: ndarray):
     label = prediction.argmax(axis=-1).flat[0]
     probability = prediction.flat[label]
     # TODO: clean up this logic
     label = None if label == 83 else label
     return cls(label, probability * 100)
예제 #30
0
def evaluate_ss_single(yt: np.ndarray,
                       yp: np.ndarray,
                       threshold: float = 0.5,
                       multilabel: bool = False) -> tuple:
    """1件分の評価のための処理。

    Args:
        yt: 1件のラベル (shape=(H, W) or (H, W, C))
        yp: 1件の推論結果 (shape=(H, W) or (H, W, C))
        threshold: 閾値 (ラベルと推論結果と両方に適用)
        multilabel: マルチラベルならTrue、多クラスならFalse。

    Returns:
        評価の途中結果

    """
    with np.errstate(all="warn"):
        if np.ndim(yt) == 2:
            yt = np.expand_dims(yt, axis=-1)
        if np.ndim(yp) == 2:
            yp = np.expand_dims(yp, axis=-1)
        assert np.ndim(yt) == 3  # (H, W, C)
        assert np.ndim(yp) == 3  # (H, W, C)

        # サイズが合ってないときは警告を出しつつ一応リサイズする
        # (リサイズアルゴリズムにより結果が変わるので非推奨)
        if yt.shape[:2] != yp.shape[:2]:
            warnings.warn("Predictions need resize.")
            yp = tk.ndimage.resize(yp, width=yt.shape[1], height=yt.shape[0])

        assert yt.shape == yp.shape

        if multilabel or yt.shape[-1] == 1:
            # マルチラベルか2クラス分類の場合、閾値以上か否かを見る
            p_true = yt >= threshold
            p_pred = yp >= threshold
        else:
            # 多クラス分類の場合、argmaxしてonehot化
            p_true = np.zeros(yt.shape, dtype=bool)
            p_true[yt.argmax(axis=-1)] = True
            p_pred = np.zeros(yp.shape, dtype=bool)
            p_pred[yp.argmax(axis=-1)] = True
        n_true = ~p_true
        n_pred = ~p_pred
        tp = np.sum(p_true & p_pred, axis=(0, 1))  # (C,)
        fp = np.sum(n_true & p_pred, axis=(0, 1))  # (C,)
        tn = np.sum(n_true & n_pred, axis=(0, 1))  # (C,)
        fn = np.sum(p_true & n_pred, axis=(0, 1))  # (C,)
        gp = np.sum(p_true, axis=(0, 1))  # (C,)
        pp = np.sum(p_pred, axis=(0, 1))  # (C,)
        if yt.shape[-1] == 1:
            # class0=bg, class1=fg。(ひっくり返るので要注意)
            cm = np.array([
                # negative,  positive
                [np.sum(tn), np.sum(fp)],  # gt negative
                [np.sum(fn), np.sum(tp)],  # gt positive
            ])
        else:
            assert yt.shape[-1] >= 2
            num_classes = yt.shape[-1]
            cm = np.zeros((num_classes, num_classes), dtype=np.int64)
            yt_c = yt.argmax(axis=-1)
            yp_c = yp.argmax(axis=-1)
            for i in range(num_classes):
                for j in range(num_classes):
                    cm[i, j] = np.sum((yt_c == i) & (yp_c == j))

        return tp, fp, tn, fn, gp, pp, cm
예제 #31
0
def accuracy(pred: np.ndarray, labels: np.ndarray):
    pred = pred.argmax(axis=1)
    return (pred == labels).mean()
예제 #32
0
 def decode(self, x: np.ndarray, calc_argmax=True) -> str:
     if calc_argmax:
         x = x.argmax(axis=-1)
     return ''.join(self.indices_char[i] for i in x)