Exemplo n.º 1
0
def compute_fixed_recursive_loss(model, batch, S_log):
    blob_dict = get_blob_dict(model.base_model, batch, training=True)

    blobs = blob_dict["blobs"]
    probs = ms.t2n(blob_dict["probs"])
    point_loss = 0.

    for b in blob_dict["blobList"]:
        if b["n_points"] != 1:
            continue

        T = np.zeros(blobs.shape[-2:])
        W = np.zeros(blobs.shape[-2:])

        ind = blobs[b["class"]] == b["blob_id"]
        T[ind] = (b["class"] + 1)
        W[ind] = probs[b["class"] + 1][ind]
        # ms.images(probs[b["class"]+1]>0.5)
        b_loss = F.nll_loss(
            S_log, ms.n2l(T[None]), ignore_index=0,
            reduce=False) * torch.FloatTensor(W).cuda()

        point_loss += (b_loss.sum() / float(ind.sum()))

    return point_loss
Exemplo n.º 2
0
def compute_split_loss(S_log,
                       S,
                       points,
                       blob_dict,
                       split_mode="line",
                       return_mask=False):
    blobs = blob_dict["blobs"]
    S_numpy = ms.t2n(S[0])
    points_npy = ms.t2n(points).squeeze()

    loss = 0.

    if return_mask:
        mask = np.ones(points_npy.shape)

    for b in blob_dict["blobList"]:
        if b["n_points"] < 2:
            continue

        c = b["class"] + 1
        probs = S_numpy[b["class"] + 1]

        points_class = (points_npy == c).astype("int")
        blob_ind = blobs[b["class"]] == b["blob_id"]

        if split_mode == "line":
            T = sp.line_splits(probs * blob_ind, points_class * blob_ind)
        elif split_mode == "water":
            T = sp.watersplit(probs, points_class * blob_ind) * blob_ind
            T = 1 - T
        else:
            raise ValueError("%s LOL" % split_mode)

        if return_mask:
            mask[T == 0] = 0

        # ms.images(T)
        scale = b["n_points"] + 1
        loss += float(scale) * F.nll_loss(S_log,
                                          ms.n2l(T)[None],
                                          ignore_index=1,
                                          reduction="elementwise_mean")

    if return_mask:
        return (mask == 0)

    return loss
Exemplo n.º 3
0
def compute_recursive_blob_loss(batch, S_log, blob_dict):
    blobs = blob_dict["blobs"]
    point_loss = 0.

    for b in blob_dict["blobList"]:
        if b["n_points"] != 1:
            continue

        T = np.zeros(blobs.shape[-2:])

        T[blobs[b["class"]] == b["blob_id"]] = (b["class"] + 1)

        point_loss += F.nll_loss(S_log,
                                 ms.n2l(T[None]),
                                 ignore_index=0,
                                 reduction="elementwise_mean")

    return point_loss
Exemplo n.º 4
0
def compute_fp_loss(S_log, blob_dict):
    blobs = blob_dict["blobs"]

    scale = 1.
    loss = 0.

    for b in blob_dict["blobList"]:
        if b["n_points"] != 0:
            continue

        T = np.ones(blobs.shape[-2:])
        T[blobs[b["class"]] == b["blob_id"]] = 0

        loss += scale * F.nll_loss(S_log,
                                   ms.n2l(T[None]),
                                   ignore_index=1,
                                   reduction="elementwise_mean")
    return loss
Exemplo n.º 5
0
def compute_boundary_loss(S_log,
                          S,
                          points,
                          counts,
                          add_bg=False,
                          return_mask=False):
    S_npy = ms.t2n(S[0])
    points_npy = ms.t2n(points).squeeze()
    loss = 0.

    if return_mask:
        mask = np.ones(points_npy.shape)

    for c in range(S.shape[1]):
        if c == 0:
            continue

        points_class = (points_npy == c).astype(int)

        if add_bg:
            points_class[S_npy[c] == S_npy[c].min()] = 1

        if points_class.sum() <= 1:
            continue

        T = sp.watersplit(S_npy[c], points_class)
        T = 1 - T

        if return_mask:
            mask[T == 0] = 0

        scale = float(counts.sum())
        loss += float(scale) * F.nll_loss(S_log,
                                          ms.n2l(T)[None],
                                          ignore_index=1,
                                          reduction="elementwise_mean")

    if return_mask:
        return (mask == 0)

    return loss