Beispiel #1
0
 def apply_anchors_ver1(pred: np.ndarray, stride: int,
                        anchor: List[Tuple[int]],
                        xyscale: float) -> np.ndarray:
     assert len(pred.shape) == 3
     assert pred.shape[2] == 255
     # align the axes to (x, y, anchor, data)
     pred = pred.reshape((*pred.shape[:2], 3, 85))
     # calc grid of anchor box
     anchor_grid = np.array(anchor)[np.newaxis, np.newaxis, :, :]
     grid = np.meshgrid(np.arange(pred.shape[1]), np.arange(pred.shape[0]))
     grid = np.stack(grid, axis=-1)[:, :, np.newaxis, :]
     # xy: min_x, min_y
     # wh: width, height
     # conf: confidence score of the bounding box
     # prob: probability for each category
     xy, wh, conf, prob = np.split(pred, (2, 4, 5), axis=-1)
     # apply anchor
     xy = ((sigmoid(xy) * xyscale) - (0.5 * (xyscale - 1)) + grid) * stride
     wh = np.exp(wh) * anchor_grid
     conf = sigmoid(conf)
     prob = sigmoid(prob)
     # concat
     bbox = np.concatenate([xy, wh, conf, prob], axis=-1)
     # expand all anchors
     bbox = np.reshape(bbox, (-1, 85))
     return bbox
Beispiel #2
0
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C_x.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x]  # list to store all the activations, layer by layer
     zs = []  # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation) + b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * \
             sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in range(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
     return (nabla_b, nabla_w)
 def on_batch_end(self, state: State):
     #  print(type(state.model))
     # if not isinstance(state.model, tta.wrappers.SegmentationTTAWrapper):
     #    print("Not instance of tta")
     #    exit()
     for prob in state.batch_out["logits"]:
         for probability in prob:
             probability = probability.detach().cpu().numpy()
             if probability.shape != (350, 525):
                 probability = cv2.resize(
                     probability, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
                 )
             prediction, num_predict = post_process(
                 sigmoid(probability),
                 threshold=self.threshold,
                 min_size=self.min_size,
             )
             if num_predict == 0:
                 self.pred_distr[-1] += 1
                 self.encoded_pixels[self.image_id] = ""
             else:
                 self.pred_distr[self.image_id % 4] += 1
                 r = mask2rle(prediction)
                 self.encoded_pixels[self.image_id] = r
             self.image_id += 1
Beispiel #4
0
    def run_test_epoch(self):
        self.model.eval()
        test_preds = np.zeros((len(self.test_loader.dataset)))

        with torch.no_grad():
            for i, (x_batch,) in enumerate(self.test_loader):
                y_pred = self.model(x_batch).detach()
                test_preds[i * self.batch_size:(i + 1) * self.batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]

        return test_preds
Beispiel #5
0
 def backprop_matrix(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``.
     x.shape() is (mini_batch_size,input_dim)
     y.shape() is (mini_batch_size,output_dim)"""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     y = np.transpose(y)
     x = np.transpose(x)
     # activation.shape=(neutron_num_of_pre_layer, mini_batch_size)
     activation = x
     activations = [x]  # list to store all the activations, layer by layer
     zs = []  # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         # w.shape=(neutron_num_of_current_layer,neutron_num_of_pre_layer)
         # b.shape=(neutron_num_of_current_layer,1)
         # z.shape=(neutron_num_of_current_layer,mini_batch_size)
         z = np.einsum('ij,jk', w, activation) + b  # b can be broadcasted
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     # delta.shape=(neutron_num_of_current_layer,mini_batch_size)
     delta = self.cost_derivative(activations[-1], y) * \
             sigmoid_prime(zs[-1])
     # nabla_b.shape=(neutron_num_of_current_layer,mini_batch_size)
     nabla_b[-1] = delta.sum(1)[:, np.newaxis]
     # nabla_w=(neutron_num_of_current_layer,neutron_num_of_pre_layer)
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in range(2, self.num_layers):
         z = zs[-l]
         # sp (cur,bat)
         sp = sigmoid_prime(z)
         # weights.T (pre,cur)
         # delta (cur,bat)
         # sp (pre,bat)
         delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
         # delta (pre,bat)
         nabla_b[-l] = delta.sum(1)[:, np.newaxis]
         nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
     # nabla_w (cur,pre)
     # nabla_b (cur,bat)
     return (nabla_b, nabla_w)
Beispiel #6
0
    def run_validation_epoch(self):
        self.model.eval()
        avg_val_loss = 0.
        valid_preds = np.zeros((len(self.valid_loader.dataset)))

        with torch.no_grad():
            for i, (x_batch, y_batch) in enumerate(self.valid_loader):
                y_pred = self.model(x_batch).detach()
                avg_val_loss += self.loss(y_pred, y_batch).item() / len(self.valid_loader)
                valid_preds[i * self.batch_size:(i + 1) * self.batch_size] = sigmoid(y_pred.cpu().numpy())[:, 0]
        search_result = threshold_search(self.y_val.cpu().numpy(), valid_preds)

        val_f1, val_threshold = search_result['f1'], search_result['threshold']

        return valid_preds, avg_val_loss, val_f1
def evaluate(model, valid_loader, loss_fn, valid_preds_fold, feats,
             kfold_X_valid_features):

    model.eval()

    avg_val_loss = 0.
    for i, (x_batch, y_batch, index) in enumerate(valid_loader):
        if feats:
            f = kfold_X_valid_features[index]
            y_pred = model([x_batch, f]).detach()
        else:
            y_pred = model(x_batch).detach()

        avg_val_loss += loss_fn(y_pred, y_batch).item() / len(valid_loader)
        valid_preds_fold[index] = sigmoid(y_pred.cpu().numpy())[:, 0]

    return avg_val_loss, valid_preds_fold
def test(model, test_loader, train_preds, valid_idx, test_preds,
         valid_preds_fold, test_preds_fold, splits, feats, test_features):

    for i, (x_batch, ) in enumerate(test_loader):
        if feats:
            f = test_features[i * batch_size:(i + 1) * batch_size]
            y_pred = model([x_batch, f]).detach()
        else:
            y_pred = model(x_batch).detach()

        test_preds_fold[i * batch_size:(i + 1) * batch_size] = sigmoid(
            y_pred.cpu().numpy())[:, 0]

    train_preds[valid_idx] = valid_preds_fold
    test_preds += test_preds_fold / len(splits)

    return test_preds
    def on_stage_end(self, state: State):
        class_params = {}
        for class_id in range(4):
            print(class_id)
            attempts = []
            for t in range(0, 100, 10):
                t /= 100
                for ms in [
                    0,
                    1000,
                    5000,
                    10000,
                    11000,
                    14000,
                    15000,
                    16000,
                    18000,
                    19000,
                    20000,
                    21000,
                    23000,
                    25000,
                    27000,
                ]:
                    masks = []
                    for i in range(class_id, len(self.probabilities), 4):
                        probability = self.probabilities[i]
                        predict, num_predict = post_process(sigmoid(probability), t, ms)
                        masks.append(predict)

                    d = []
                    for i, j in zip(masks, self.valid_masks[class_id::4]):
                        d.append(single_dice_coef(y_pred_bin=i, y_true=j))

                    attempts.append((t, ms, np.mean(d)))

            attempts_df = pd.DataFrame(attempts, columns=["threshold", "size", "dice"])

            attempts_df = attempts_df.sort_values("dice", ascending=False)
            print(attempts_df.head())
            best_threshold = attempts_df["threshold"].values[0]
            best_size = attempts_df["size"].values[0]

            class_params[class_id] = (best_threshold, best_size)
            np.save("./logs/class_params.npy", class_params)
Beispiel #10
0
 def apply_anchors_ver2(pred: np.ndarray, stride: int,
                        anchor: List[Tuple[int]],
                        xyscale: float) -> np.ndarray:
     assert len(pred.shape) == 3
     assert pred.shape[2] == 255
     # align the axes to (x, y, anchor, data)
     pred = pred.reshape((*pred.shape[:2], 3, 85))
     # calc grid of anchor box
     anchor_grid = np.array(anchor)[np.newaxis, np.newaxis, :, :]
     grid = np.meshgrid(np.arange(pred.shape[1]), np.arange(pred.shape[0]))
     grid = np.stack(grid, axis=-1)[:, :, np.newaxis, :]
     # apply anchor
     pred = sigmoid(pred)
     pred[..., :2] = ((pred[..., :2] * xyscale) -
                      (0.5 * (xyscale - 1)) + grid) * stride
     pred[..., 2:4] = ((pred[..., 2:4] * xyscale)**2) * anchor_grid
     # expand all anchors
     bbox = np.reshape(pred, (-1, 85))
     return bbox
    def on_batch_end(self, state: State):
        preds_write = [False, False, False, False]
        allow = True
        for prob in state.batch_out["logits"]:
            for probability in prob:
                probability = probability.detach().cpu().numpy()
                probability = sigmoid(probability)
                pseudo_label = np.copy(probability)
                ones_condition = (pseudo_label < self.low_threshold) | (
                    pseudo_label > self.high_threshold
                )

                pseudo_label[ones_condition] = 1
                pseudo_label[~ones_condition] = 0
                val = (
                    pseudo_label.sum()
                    * 100
                    / (pseudo_label.shape[0] * pseudo_label.shape[1])
                )
                if val < self.good_pixel_threshold:
                    allow = False

                preds_write[self.image_id % 4] = (
                    probability,
                    self.sub.iloc[self.image_id]["Image_Label"],
                )
                self.image_id += 1

                if self.image_id % 4 == 0:
                    if allow:
                        for probability_new, name in preds_write:
                            self.names_pl.append(name)
                            predict_pl, num_predict_pl = post_process(
                                probability_new,
                                self.activation_threshold,
                                self.mask_size_threshold,
                            )
                            if num_predict_pl == 0:
                                self.encoded_pixels_pl.append("")
                            else:
                                r_pl = mask2rle(predict_pl)
                                self.encoded_pixels_pl.append(r_pl)
                    allow = True
Beispiel #12
0
def infer(model, testloader, class_params, output_path):
    encoded_pixels = []
    pred_distr = {-1: 0, 0: 0, 1: 0, 2: 0, 3: 0}
    image_id = 0
    model.eval()
    with torch.no_grad():
        for images, _ in tqdm.tqdm(testloader, total=len(testloader)):
            masks = model(images)
            for mask in masks:
                mask = mask.cpu().detach().numpy()
                if mask.shape != (350, 525):
                    mask = cv2.resize(
                        mask, dsize=(525, 350), interpolation=cv2.INTER_LINEAR
                    )
                mask, num_predict = post_process(
                    sigmoid(mask),
                    class_params[image_id % 4][0],
                    class_params[image_id % 4][1],
                )
                if num_predict == 0:
                    pred_distr[-1] += 1
                    encoded_pixels.append("")
                else:
                    pred_distr[image_id % 4] += 1
                    r = mask2rle(mask)
                    encoded_pixels.append(r)
                image_id += 1

    print(
        f"empty={pred_distr[-1]} fish={pred_distr[0]} flower={pred_distr[1]} gravel={pred_distr[2]} sugar={pred_distr[3]}"
    )
    non_empty = pred_distr[0] + pred_distr[1] + pred_distr[2] + pred_distr[3]
    all = non_empty + pred_distr[-1]
    sub = pd.read_csv(f"{output_path}/sample_submission.csv")
    sub["EncodedPixels"] = encoded_pixels
    sub.to_csv(
        f"submission_{round(non_empty/all, 3)}.csv",
        columns=["Image_Label", "EncodedPixels"],
        index=False,
    )
Beispiel #13
0
 def forward(self, batch):
     self.X = np.array(sigmoid(x) for x in batch)
     return self.X
Beispiel #14
0
 def feedforward(self, a):
     for w, b in zip(self.weights, self.biases):
         a = sigmoid(np.dot(w, a) + b)
     return a
Beispiel #15
0
if __name__ == '__main__':
    df = utils.get_ts_data()
    il_sr = df.loc['Israel']
    il_sr_nozeros = utils.rm_zeros(il_sr)

    n_days = il_sr_nozeros.size
    x_days = list(range(n_days))

    l_guess = 0.65 * 9 * (10**6)
    k_guess = 1.0
    x0_guess = n_days if n_days <= 60 else n_days / 2

    params_opt, _ = utils.estimate_sigmoid_params(x_days, il_sr_nozeros.values,
                                                  [l_guess, k_guess, x0_guess])

    opt_l, opt_k, opt_x0 = params_opt

    x_range = np.linspace(0, n_days + 3, 1000)  # opt_x0 * 2

    print(opt_l)

    plt.figure()
    plt.plot(range(n_days), il_sr_nozeros, label='Data', marker='o')
    plt.plot(x_range,
             utils.sigmoid(x_range, *params_opt),
             'r-',
             ls='-',
             label="Sigmoid Fit")
    plt.legend()
    plt.show()
from src.utils import L_ex, sigmoid, roc_curve, get_path


path = get_path(__file__) + '/..'

w = np.array([-410.6073, 0.1494, 4.4185])

idxs = [L_ex.index(f) for f in ['sdE5', 'V11', 'E9']]
Xf = D_ex[:, idxs]

num_tests = 5
results = []

for i in range(num_tests):
    test_rows = np.random.random_integers(0, D_ex.shape[0]-1, 1e5)

    X = Xf[test_rows, :]
    y = D_ex[test_rows, 2]

    lin = np.dot(X, w)
    probs = sigmoid(lin)

    fpr, tpr, thresholds = roc_curve(y, probs, thresholds=np.linspace(0,1,1e3))

    results.append(auc(fpr, tpr))


json_path = '{0}/data/hard-coded-results-{1}-tests.json'.format(path, num_tests)
with open(json_path, 'w') as f:
    json.dump(results, f, indent=4)
 def __call__(self, input):
     """Forward pass of the logistic classifier.
     args:
         input (np.array) : row matrix of samples
     """
     return sigmoid(np.dot(input, self.weights))