Ejemplo n.º 1
0
 def predict_with_keras_model(model_weights: List,
                              x: np.ndarray) -> np.ndarray:
     try:
         pred = softmax(np.dot(x, model_weights[0]) + model_weights[1])
     except IndexError as e:
         pred = softmax(np.dot(x, model_weights[0]))
     return np.argmax(pred, axis=1)
Ejemplo n.º 2
0
def correct_preds_for_insides(preds, spans_coords, logits, insides, mapping,
                              inverse_mapping):
    for i in range(len(preds)):
        for j in range(len(preds)):
            if spans_coords[j][0] >= spans_coords[i][0] and spans_coords[j][
                    1] <= spans_coords[i][1]:
                if spans_coords[j][0] != spans_coords[i][0] or spans_coords[j][
                        1] != spans_coords[i][1]:
                    def_i = preds[i]
                    def_j = preds[j]
                    log = softmax([logits[i]])[0]
                    login = softmax([logits[j]])[0]
                    def_prob_i = log[inverse_mapping[preds[i]]]
                    def_prob_j = login[inverse_mapping[preds[j]]]
                    while preds[j] not in insides.get(preds[i], []):
                        if log[inverse_mapping[preds[i]]] > login[
                                inverse_mapping[preds[j]]]:
                            values = np.sort(login)[-2:]
                            if values[1] / (values[0] + 1e-6) > 1.4:
                                preds[i] = def_i
                                preds[j] = def_j
                                break
                            login[inverse_mapping[preds[j]]] = 0
                            preds[j] = mapping[np.argmax(login)]
                        else:
                            values = np.sort(log)[-2:]
                            if values[1] / (values[0] + 1e-6) > 1.4:
                                preds[i] = def_i
                                preds[j] = def_j
                                break
                            log[inverse_mapping[preds[i]]] = 0
                            preds[i] = mapping[np.argmax(log)]
    return preds
Ejemplo n.º 3
0
def ensemble(scores1, scores2):
    ss1 = scores1.sum(axis=0).reshape([1, -1])
    ss2 = scores2.sum(axis=0).reshape([1, -1])
    ss1 = softmax(ss1)
    ss2 = softmax(ss2)
    final_scores = ss1 + ss2
    final_pred = np.argmax(final_scores)
    return final_pred
Ejemplo n.º 4
0
def rpotts(X, model):
    features, A = X

    n_classes = len(model.classes_)
    n_sites = features.shape[0]
    
    n_neighbors = A.sum(axis=1).reshape(-1, 1)

    R = np.random.uniform(size=(n_sites, 1))

    lower = np.empty((n_sites, 1))
    upper = np.empty((n_sites, 1))

    betas = model.coef_[:, :-1]
    eta = model.coef_[:, -1:]

    p = safe_sparse_dot(features, betas.T, dense_output=True)
    p += model.intercept_

    p_nonspatial = np.hstack((p, np.zeros((features.shape[0], 1))))
    p_nonspatial -= logsumexp(p_nonspatial, axis=1)[:, np.newaxis]
    p_nonspatial = np.exp(p_nonspatial, p_nonspatial)

    _target = model.lbin.transform

    i = 0
    while not np.array_equal(upper, lower):
        R = np.hstack((np.random.uniform(size=R.shape), R))

        print(upper.sum(), lower.sum())
        lower[:] = 0
        upper[:] = n_classes - 1

        for r in R.T:
            r = r.reshape(-1, 1)

            upper_multi = _target(upper)
            upper_spatial = safe_sparse_dot(A, (upper_multi - p_nonspatial))[:, :-1]/n_neighbors
            upper_spatial[np.isnan(upper_spatial)] = 0

            upper_p = p + (eta.T * np.array(upper_spatial))
            upper_p = softmax(np.hstack((upper_p, np.zeros((features.shape[0], 1)))))
            upper_p = upper_p.cumsum(axis=1)
            
            lower_multi = _target(lower)
            lower_spatial = safe_sparse_dot(A, (lower_multi - p_nonspatial))[:, :-1]/n_neighbors
            lower_spatial[np.isnan(lower_spatial)] = 0

            lower_p = p + (eta.T * np.array(lower_spatial))
            lower_p = softmax(np.hstack((lower_p, np.zeros((features.shape[0], 1)))))
            lower_p = lower_p.cumsum(axis=1)

            upper = (upper_p > r).argmax(axis = 1)
            lower = (lower_p > r).argmax(axis = 1)

    return lower.reshape(-1, 1)
Ejemplo n.º 5
0
    def get_sensitivity_map(self, model, image, class_index, patch_size, color, use_softmax=False, use_old_confidence=False):
        """
        Compute sensitivity map on a given image for a specific class index.

        Args:
            model (tf.keras.Model): tf.keras model to inspect
            image:
            class_index (int): Index of targeted class
            patch_size (int): Size of patch to apply on the image
            use_softmax (bool): should a softmax be used on the predictions of the model
            use_old_confidence (bool): compute the saliency map with original_confidence - prediction, or 1 - prediction

        Returns:
            np.ndarray: Sensitivity map with shape (H, W, 3)
        """
        sensitivity_map = np.zeros(
            (
                math.ceil(image.shape[0] / patch_size),
                math.ceil(image.shape[1] / patch_size),
            )
        )

        patches = [
            custom_apply_grey_patch(image, top_left_x, top_left_y, patch_size, color=color)
            for index_x, top_left_x in enumerate(range(0, image.shape[0], patch_size))
            for index_y, top_left_y in enumerate(range(0, image.shape[1], patch_size))
        ]

        coordinates = [
            (index_y, index_x)
            for index_x in range(
                sensitivity_map.shape[1]
            )
            for index_y in range(
                sensitivity_map.shape[0]
            )
        ]

        pred = model.predict(np.expand_dims(image, axis=0))
        if use_softmax:
            pred = softmax(pred)
        original_pred = np.squeeze(pred)[class_index]
        for (index_y, index_x), patch in zip(
            coordinates, patches
        ):
            prediction = model.predict(np.expand_dims(patch, axis=0))
            if use_softmax:
                confidence = np.squeeze(softmax(prediction))[class_index]
            else:
                confidence = np.squeeze(prediction)[class_index]
            if use_old_confidence:
                sensitivity_map[index_y, index_x] = original_pred - confidence
            else:
                sensitivity_map[index_y, index_x] = 1 - confidence

        return cv2.resize(sensitivity_map, image.shape[0:2])
Ejemplo n.º 6
0
    def finalize_metrics(self, ks=(1, 2)):
        """
        Calculate and log the final ensembled metrics.
        ks (tuple): list of top-k values for topk_accuracies. For example,
            ks = (1, 5) correspods to top-1 and top-5 accuracy.
        """
        if self.isDemo:
            preds_numpy = self.video_preds.clone()
            normalize = np.array(softmax(preds_numpy.cpu().numpy()))
            jogging_label = 21
            sort_p = []
            for p in normalize:
                sort_p.append(sorted(p, reverse=True))

            propability = np.transpose(
                np.array(softmax(preds_numpy.cpu().numpy())))

            for i, v in enumerate(propability[jogging_label]):
                top1_v = sort_p[i][0]
                top2_v = sort_p[i][1]
                if v == top1_v or v == top2_v:
                    propability[jogging_label][
                        i] = propability[jogging_label][i] / (top1_v + top2_v)

            cwd = os.getcwd()
            tmp_dir = os.path.join(cwd, "tmp")
            if not os.path.exists(tmp_dir):
                os.mkdir(tmp_dir)
            out_dir = os.path.join(tmp_dir, "probability.npy")

            np.save(out_dir, propability[jogging_label])
        if not all(self.clip_count == self.num_clips):
            logger.warning("clip count {} ~= num clips {}".format(
                self.clip_count, self.num_clips))
            logger.warning(self.clip_count)

        num_topks_correct = metrics.topks_correct(self.video_preds,
                                                  self.video_labels, ks)
        topks = [(x / self.video_preds.size(0)) * 100.0
                 for x in num_topks_correct]
        #binary = [
        #    (x / self.video_preds.size(0)) * 100.0 for x in binary_correct
        #]
        assert len({len(ks), len(topks)}) == 1
        stats = {"split": "test_final"}

        for k, topk in zip(ks, topks):
            stats["top{}_acc".format(k)] = "{:.{prec}f}".format(topk, prec=2)
Ejemplo n.º 7
0
 def classifier_predict_proba(X, estimator, skip_attr_set_idxs=[]):
     log_odds_vector = EBMUtils.decision_function(
         X, estimator.attribute_sets_, estimator.attribute_set_models_,
         estimator.intercept_, skip_attr_set_idxs)
     log_odds_trans = np.c_[-log_odds_vector, log_odds_vector]
     scores = softmax(log_odds_trans, copy=True)
     return scores
Ejemplo n.º 8
0
    def predict_proba(self, X):
        """Predict proba using softmax.

        Parameters
        ----------
        X : ndarray, shape (n_matrices, n_channels, n_channels)
            Set of SPD matrices.

        Returns
        -------
        prob : ndarray, shape (n_matrices, n_classes)
            Probabilities for each class.
        """
        n_matrices, _, _ = X.shape

        dist = self._predict_distances(X)
        idx = np.argsort(dist)
        dist_sorted = np.take_along_axis(dist, idx, axis=1)
        neighbors_classes = self.classmeans_[idx]
        probas = softmax(-dist_sorted[:, 0:self.n_neighbors]**2)

        prob = np.zeros((n_matrices, len(self.classes_)))
        for m in range(n_matrices):
            for il, ll in enumerate(self.classes_):
                prob[m, il] = np.sum(
                    probas[m, neighbors_classes[m, 0:self.n_neighbors] == ll])

        return prob
def eval_fn(data_loader, model, device):
    model.eval()
    losses = AverageMeter()
    tk0 = tqdm(data_loader, total=len(data_loader))
    yt, yp = [], []

    for bi, d in enumerate(tk0):
        ids = d['ids']
        mask = d['mask']
        label = d['label']

        ids = ids.to(device, dtype=torch.long)
        label = label.to(device, dtype=torch.long)
        mask = mask.to(device, dtype=torch.long)

        with torch.no_grad():
            k = model(input_ids=ids, attention_mask=mask, labels=label)
            loss = k['loss']
            logits = k['logits']
            # loss, logits = model(input_ids=ids, attention_mask=mask, labels=label)

        logits = logits.detach().cpu().numpy()

        preds = softmax(logits)
        pred_labels = np.argmax(preds, axis=1).flatten()
        ground_labels = label.to('cpu').numpy()
        # print("predict label:", pred_labels.tolist(), ";actual label:", ground_labels.tolist())
        yt = yt + ground_labels.tolist()
        yp = yp + pred_labels.tolist()

        losses.update(loss.item(), ids.size(0))
        tk0.set_postfix(loss=losses.avg)

    return f1_score(yt, yp)
Ejemplo n.º 10
0
def read_prediction_logit_file(path):

    prediction = read_tsv_file(path)
    prediction = softmax([[float(i[0]), float(i[1])] for i in prediction])
    prediction = [i[0] for i in prediction]

    return prediction
Ejemplo n.º 11
0
def injectFaultSoftmax(a):
	"Function to call injectFault on Softmax"
	logging.debug("Calling Operator Softmax " + getArgs(a))
	res = softmax(a)		# Use the implementation from the sklearn library
	res = condPerturb(Ops.SOFTMAX, res)
	if logReturn: logging.debug("\tReturning from Softmax " + str(res) )
	return res
Ejemplo n.º 12
0
    def predict_proba(self, X_INT_ID):
        # Check is fit had been called
        check_is_fitted(self, ['X_', 'y_'])

        # Input validation
        # X = check_array(X)

        n2v_id_col = self.n2v_manager.label_id_colname
        n2v_label_col = self.n2v_manager.label_colname

        X_ = self.feat_df_.loc[X_INT_ID, :]
        if n2v_id_col in X_:
            X_.drop(columns=n2v_id_col, inplace=True)
        if n2v_label_col in X_:
            X_.drop(columns=n2v_label_col, inplace=True)

        if hasattr(self.classifier, "predict_proba"):
            y_ = self.classifier.predict_proba(X_)
            return y_
        else:
            decision = self.decision_function(X_)
            if decision.ndim == 1:
                # Workaround for multi_class="multinomial" and binary outcomes
                # which requires softmax prediction with only a 1D decision.
                decision_2d = np.c_[-decision, decision]
            else:
                decision_2d = decision
            return softmax(decision_2d, copy=False)
def eval_fitness_simulate(env, part, Nx0=5, Kmax=1000, Nmc=1, gamma=0.95, show=False):
    obs_space = env.observation_space.shape[0]
    action_space = env.action_space.n 
    
    Nx0_rewards = []
    for i_Nx0 in range(Nx0):
        # observation = env.reset()
        # save_point_env = np.copy.deepcopy(env)
        Nmc_rewards = []
        for i_Nmc in range(Nmc):
            # env = save_point_env
            observation = env.reset()
            t_rewards = []
            for t in range(Kmax):
                if show:
                    env.render()
                part_matrix_w = np.reshape(part[:-action_space],(action_space,obs_space))
                part_matrix_b = part[-action_space:]
                prob_weights = softmax([part_matrix_w.dot(observation)+part_matrix_b]).ravel()
                # decide action
                action = np.random.choice(range(len(prob_weights)), p=prob_weights)
                observation, reward, done, info = env.step(action)
                t_rewards.append(reward)
                if done:
                    Nmc_rewards.append( discount_and_normalize_rewards( t_rewards, gamma, norm=False )[0] )
                    break
        Nx0_rewards.append( np.mean(Nmc_rewards) )
    return (np.mean(Nx0_rewards),)
Ejemplo n.º 14
0
    def generate_explanation(self,
                             stacked_frames,
                             model,
                             radius,
                             raw_diff=False,
                             neuron_selection=False):
        """
        Generates an explanation the prediction of a CNN

        Args:
            stacked_frames: input of which the prediction will be explained
            model: the model which should be explained
            radius: the radius of the blur
            raw_diff: use the raw difference of the confidence values or the euklidean disatance

        Returns:
            scores: The saliency map which functions as explanantion
        """
        # d: density of scores (if d==1, then get a score for every pixel...
        #    if d==2 then every other, which is 25% of total pixels for a 2D image)
        d = radius
        # r: radius of blur
        r = radius

        my_input = np.expand_dims(stacked_frames, axis=0)
        original_output = model.predict(my_input)

        x = stacked_frames.shape[0]
        y = stacked_frames.shape[1]

        scores = np.zeros((int((x - 1) / d) + 1, int(
            (y - 1) / d) + 1))  # saliency scores S(t,i,j)

        for i in range(0, x, d):
            for j in range(0, y, d):
                mask = self.get_mask(center=[i, j], size=[x, y], r=r)
                stacked_mask = np.zeros(shape=stacked_frames.shape)
                for idx in range(stacked_frames.shape[2]):
                    stacked_mask[:, :, idx] = mask

                masked_input = np.expand_dims(greydanus_explainer.occlude(
                    stacked_frames, stacked_mask),
                                              axis=0)
                masked_output = model.predict(masked_input)
                if raw_diff:
                    if neuron_selection is not False:
                        action_index = neuron_selection
                    else:
                        action_index = np.argmax(original_output)
                    scores[int(i / d), int(j / d)] = 1 - np.squeeze(
                        softmax(masked_output))[action_index]
                else:
                    scores[int(i / d), int(j / d)] = (
                        pow(original_output - masked_output, 2).sum() * 0.5)

        pmax = scores.max()
        scores = Image.fromarray(scores).resize(size=[x, y],
                                                resample=Image.BILINEAR)
        scores = pmax * scores / np.array(scores).max()
        return scores
Ejemplo n.º 15
0
 def calcDerivative(self):
     input = self.getInput(postForward=True)
     sx = softmax(input)
     s = sx.reshape(-1, 1)
     partial = np.diagflat(s) - np.dot(s, s.T)
     dydx = np.dot(self.backInput, partial)
     return dydx
Ejemplo n.º 16
0
def to_softmax(logits):
  num_classes = logits.shape[1]
  if num_classes == 1:
    scores = expit(logits)
  else:
    scores = softmax(logits)
  return scores
Ejemplo n.º 17
0
    def act(self, round, prev_state, prev_action, reward, new_state, too_slow):
        if round > self.game_duration - self.score_scope:
            # cancel exploration during "money-time"
            self.use_softmax_sampling = False
            self.epsilon = 0

        new_state_repr = self.state_proc.get_state_repr(new_state)

        if prev_state is not None:
            prev_state_repr = self.state_proc.get_state_repr(prev_state)
            self.memory.add(prev_state_repr,
                            bp.Policy.ACTIONS.index(prev_action), reward,
                            new_state_repr, 0)

        if self.use_softmax_sampling:
            return np.random.choice(
                bp.Policy.ACTIONS,
                p=softmax(
                    self.model.predict(new_state_repr[np.newaxis]) /
                    self.epsilon).squeeze())
        else:  # use epsilon-greedy
            if np.random.rand() < self.epsilon:
                return np.random.choice(bp.Policy.ACTIONS)
            else:
                prediction = self.model.predict(new_state_repr[np.newaxis])[0]
                action = bp.Policy.ACTIONS[np.argmax(prediction)]
                return action
Ejemplo n.º 18
0
 def decode_output(self, logits, doc_blocks):
     outputs = []
     for jdx, preds in enumerate(logits):
         output = {}
         blocks = doc_blocks[jdx]
         for idx, label in enumerate(self.label_order):
             if label in ['author', 'date', 'breadcrumbs']:
                 top_k = 10
                 scores = softmax([preds[:, idx]])[0]
                 ind = np.argpartition(preds[:, idx], -top_k)[-top_k:]
                 result = [(fix_encoding(str_cast(blocks[idx].text)),
                            scores[idx]) for idx in ind
                           if scores[idx] > self.cls_threshold]
                 # sort values by confidence
                 output[label] = sorted(result,
                                        key=lambda x: x[1],
                                        reverse=True)
             else:
                 mask = expit(preds[:, idx]) > self.binary_threshold
                 ctx = fix_encoding(
                     str_cast(b'\n'.join([b.text for b in blocks[mask]])))
                 if len(ctx) == 0:
                     ctx = None
                 output[label] = ctx
         outputs.append(output)
     return outputs
Ejemplo n.º 19
0
    def analyze(path):
        from keras.models import load_model
        start = datetime.now()
        tok = Tokenizer(num_words=MAX_WORDS)
        vocab = pd.read_csv(
            os.path.realpath(__file__)[:-11] + '/data/java/vocab.csv')
        tok.fit_on_texts(vocab.input)
        root = os.path.realpath(__file__)[:-11] + "/data/java/checkpoints/"

        h5_ls, vuln_models = os.listdir(root), {}

        for h5 in h5_ls:
            progress = str(h5_ls.index(h5) + 1) + "/" + str(len(h5_ls))
            print("\x1b[33m(" + progress + ") - Loading " + h5[:-3] +
                  "...\x1b[m")
            vuln_models[h5[:-3]] = load_model(root + h5)

        jfile = JavaClass(path)
        for method in jfile.methods:
            print("\n\x1b[33mEvaluating " + method.name + "()...\x1b[m")
            metrics, i = [], 0
            for vuln_model in vuln_models:
                pred = float(vuln_models[vuln_model].predict(
                    Javalect._embed(tok, str(method.tokens())))[0][0])
                metrics.append(pred)
            soft_metrics = list(softmax(np.asarray([metrics]))[0])
            print("  p-risk     p-dist     vulnerability")
            for vuln_model in vuln_models:
                print("  " + _fmt(metrics, i) + "     " +
                      _fmt(soft_metrics, i) + "     " + vuln_model)
                i += 1

        print("\n\x1b[33mAnalyzed " + str(len(jfile.methods)) +
              " methods against " + str(len(h5_ls)) + " vulnerabilities in " +
              str(datetime.now() - start) + "\x1b[m.")
def generate_action(env, prev_action, observation, w, b):
    #    index = np.random.randn(env.action_space.n)
    #    index[2] = np.abs(index[2])     # Favor main engine over the others:
    #    return np.argmax(index)
    prob_weights = softmax([w.dot(observation) + b]).ravel()
    action = np.random.choice(range(len(prob_weights)), p=prob_weights)
    return action
Ejemplo n.º 21
0
    def predict_proba(self, X):
        """Predict class probabilities for X.

        The predicted class probabilities of an input sample is computed.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        p : array of shape = [n_samples, n_classes].
            The class probabilities of the input samples.
        """

        if self.n_classes_ <= 2:
            proba = self.estimator.predict_proba(X)
            proba = np.c_[1 - proba, proba]
        else:
            proba = np.zeros((X.shape[0], self.n_classes_))
            for i, clf in enumerate(self.estimators):
                class_proba = clf.predict_proba(X)
                proba[:, i] = class_proba

            #In honest I don't understand which is better
            # softmax or normalized sigmoid for calc probability.
            if self.calc_prob == "Sigmoid":
                proba = sigmoid(proba)
                normalizer = proba.sum(axis=1)[:, np.newaxis]
                normalizer[normalizer == 0.0] = 1.0
                proba /= normalizer
            else:
                proba = softmax(proba)
        return proba
Ejemplo n.º 22
0
def test_calibration_multiclass(method, ensemble, seed):
    def multiclass_brier(y_true, proba_pred, n_classes):
        Y_onehot = np.eye(n_classes)[y_true]
        return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]

    # Test calibration for multiclass with classifier that implements
    # only decision function.
    clf = LinearSVC(random_state=7)
    X, y = make_blobs(
        n_samples=500, n_features=100, random_state=seed, centers=10, cluster_std=15.0
    )

    # Use an unbalanced dataset by collapsing 8 clusters into one class
    # to make the naive calibration based on a softmax more unlikely
    # to work.
    y[y > 2] = 2
    n_classes = np.unique(y).shape[0]
    X_train, y_train = X[::2], y[::2]
    X_test, y_test = X[1::2], y[1::2]

    clf.fit(X_train, y_train)

    cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble)
    cal_clf.fit(X_train, y_train)
    probas = cal_clf.predict_proba(X_test)
    # Check probabilities sum to 1
    assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))

    # Check that the dataset is not too trivial, otherwise it's hard
    # to get interesting calibration data during the internal
    # cross-validation loop.
    assert 0.65 < clf.score(X_test, y_test) < 0.95

    # Check that the accuracy of the calibrated model is never degraded
    # too much compared to the original classifier.
    assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)

    # Check that Brier loss of calibrated classifier is smaller than
    # loss obtained by naively turning OvR decision function to
    # probabilities via a softmax
    uncalibrated_brier = multiclass_brier(
        y_test, softmax(clf.decision_function(X_test)), n_classes=n_classes
    )
    calibrated_brier = multiclass_brier(y_test, probas, n_classes=n_classes)

    assert calibrated_brier < 1.1 * uncalibrated_brier

    # Test that calibration of a multiclass classifier decreases log-loss
    # for RandomForestClassifier
    clf = RandomForestClassifier(n_estimators=30, random_state=42)
    clf.fit(X_train, y_train)
    clf_probs = clf.predict_proba(X_test)
    uncalibrated_brier = multiclass_brier(y_test, clf_probs, n_classes=n_classes)

    cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble)
    cal_clf.fit(X_train, y_train)
    cal_clf_probs = cal_clf.predict_proba(X_test)
    calibrated_brier = multiclass_brier(y_test, cal_clf_probs, n_classes=n_classes)
    assert calibrated_brier < 1.1 * uncalibrated_brier
Ejemplo n.º 23
0
 def get_batch_scores(bx, bt):
     with torch.no_grad():
         bx = nn.utils.rnn.pad_sequence(bx, batch_first=True, padding_value=0).to(self.device)
         bt = nn.utils.rnn.pad_sequence(bt, batch_first=True, padding_value=1).to(self.device)
         outputs = self.model(bx, bt, (bx != 0))
         logits = outputs[0].to('cpu').numpy()
         b_scores = softmax(logits)
         scores.extend(b_scores[:, 1].tolist())
 def get_proba(self, z):
     mus = self.get_mus()
     z = np.array(z).reshape(-1, 1)
     if self.prior is None:
         log_prior = 0
     else:
         log_prior = self.log_prior
     return softmax(log_prior - np.abs(mus - z))
 def predict_proba(self, X):
     check_is_fitted(self)
     decision = self.decision_function(X)
     if self.classes_.size == 2:
         proba = expit(decision)
         return np.vstack([1-proba, proba]).T
     else:
         return softmax(decision)
Ejemplo n.º 26
0
def tpr(outputs, labels):
    outputs = np.round(softmax(np.array([outputs])))
    labels = np.array([labels])

    outputs = list(outputs[0])
    labels = list(labels[0])
    fpr, tpr, thresholds = metrics.roc_curve(labels, outputs)
    tpr = np.nanmean(tpr)
    return tpr
Ejemplo n.º 27
0
 def _compute_stds_and_averages(self, X, weights):
     z = np.dot(X, weights[0])
     stds = weights[1::3]
     averages = weights[2::3]
     for alpha, beta, w in zip(stds, averages, weights[3::3]):
         a = self.saving_batch_normalization(z, alpha, beta)
         a_r = relu(a)
         z = np.dot(a_r, w)
     return softmax(z)
Ejemplo n.º 28
0
    def classifier_predict_proba(X, feature_groups, model, intercept):
        log_odds_vector = EBMUtils.decision_function(X, feature_groups, model,
                                                     intercept)

        # Handle binary classification case -- softmax only works with 0s appended
        if log_odds_vector.ndim == 1:
            log_odds_vector = np.c_[np.zeros(log_odds_vector.shape),
                                    log_odds_vector]

        return softmax(log_odds_vector)
Ejemplo n.º 29
0
 def apply_activation(self, in_arr, activation):
     if activation == "sigmoid":
         return 1.0 / (1 + np.exp(-in_arr))
     elif activation == "relu":
         out_arr = np.maximum(0, in_arr)
         return out_arr
     elif activation == "softmax":
         return softmax(in_arr)
     elif activation == 'tanh':
         return np.tanh(in_arr)
Ejemplo n.º 30
0
 def predict_proba(self, X):
     if self._strategy == 'ova':
         scores = self.score_samples(X)
         smin = np.min(scores, axis=-1, keepdims=True)
         smax = np.max(scores, axis=-1, keepdims=True)
         scores = (scores - smin) / (smax - smin)
         proba = softmax(scores)
     elif self._strategy == 'all':
         proba = self.score_samples(X)
     return proba
Ejemplo n.º 31
0
 def predict_proba(self, X):
   if self._strategy == 'ova':
     scores = self.score_samples(X)
     smin = np.min(scores, axis=-1, keepdims=True)
     smax = np.max(scores, axis=-1, keepdims=True)
     scores = (scores - smin) / (smax - smin)
     proba = softmax(scores)
   elif self._strategy == 'all':
     proba = self.score_samples(X)
   return proba
Ejemplo n.º 32
0
 def classifier_predict_proba(X, estimator, skip_attr_set_idxs=[]):
     log_odds_vector = EBMUtils.decision_function(
         X,
         estimator.attribute_sets_,
         estimator.attribute_set_models_,
         estimator.intercept_,
         skip_attr_set_idxs,
     )
     log_odds_trans = np.c_[-log_odds_vector, log_odds_vector]
     scores = softmax(log_odds_trans, copy=True)
     return scores
Ejemplo n.º 33
0
    def predict_proba(self, X):
        """Predict proba using softmax.

        Parameters
        ----------
        X : ndarray, shape (n_trials, n_channels, n_channels)
            ndarray of SPD matrices.

        Returns
        -------
        prob : ndarray, shape (n_trials, n_classes)
            the softmax probabilities for each class.
        """
        return softmax(-self._predict_distances(X))
Ejemplo n.º 34
0
    def predict_proba(self, X):
        """
        Predict class probabilities for X.

        The predicted class probabilities of an input sample are computed.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        p : array of shape = [n_samples, n_classes].
            The class probabilities of the input samples.
            The order of the classes corresponds to that in the attribute classes_.
        """
        if self._fitted is None:
            raise NotFittedError(_NOT_FITTED_ERROR_DESC)
        X = check_array(X, accept_sparse=True)
        n_features = X.shape[1]
        if self._n_features != n_features:
            raise ValueError("Number of features of the model must "
                             "match the input. Model n_features is %s and "
                             "input n_features is %s "
                             % (self._n_features, n_features))
        if self._n_classes == 2:
            y = self._estimators[0].predict_proba(X)
            y = _sigmoid(y)
            y = np.c_[y, 1 - y]
        else:
            y = np.zeros((X.shape[0], self._n_classes))
            for i, clf in enumerate(self._estimators):
                class_proba = clf.predict_proba(X)
                y[:, i] = class_proba

            # In honest, I don't understand which is better
            # softmax or normalized sigmoid for calc probability.
            if self.calc_prob == "sigmoid":
                y = _sigmoid(y)
                normalizer = np.sum(y, axis=1)[:, np.newaxis]
                normalizer[normalizer == 0.0] = 1.0
                y /= normalizer
            else:
                y = softmax(y)
        return y
Ejemplo n.º 35
0
    def predict_proba(self, X):
        """
        Predict class probabilities for X.

        The predicted class probabilities of an input sample are computed.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples.

        Returns
        -------
        p : array of shape = [n_samples, n_classes].
            The class probabilities of the input samples.
            The order of the classes corresponds to that in the attribute classes_.
        """
        if not hasattr(self, '_fitted') or not self._fitted:
            raise NotFittedError(NOT_FITTED_ERROR_DESC)
        X = check_array(X, accept_sparse=True)
        self._check_n_features(X.shape[1])

        if self._n_classes == 2:
            y = self._estimators[0].predict(X)
            y = sigmoid(y)
            y = np.c_[y, 1 - y]
        else:
            y = np.zeros((X.shape[0], self._n_classes))
            for i, clf in enumerate(self._estimators):
                class_proba = clf.predict(X)
                y[:, i] = class_proba

            if self.calc_prob == "sigmoid":
                y = sigmoid(y)
                normalizer = np.sum(y, axis=1)[:, np.newaxis]
                normalizer[normalizer == 0.0] = 1.0
                y /= normalizer
            else:
                y = softmax(y)
        return y
Ejemplo n.º 36
0
def _predict_proba(lr, X):
    pred = safe_sparse_dot(X, lr.coef_.T)
    if hasattr(lr, "intercept_"):
        pred += lr.intercept_
    return softmax(pred)
Ejemplo n.º 37
0
def test_softmax():
    rng = np.random.RandomState(0)
    X = rng.randn(3, 5)
    exp_X = np.exp(X)
    sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
    assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)