コード例 #1
0
def predict_contacts(model, x, y, use_cuda):
    b = len(x)
    x, order = pack_sequences(x)
    x = PackedSequence(Variable(x.data), x.batch_sizes)
    z = model(x)  # embed the sequences
    z = unpack_sequences(z, order)

    logits = []
    y_list = []
    for i in range(b):
        zi = z[i]
        lp = model.predict(zi.unsqueeze(0)).view(-1)

        yi = y[i].view(-1)
        if use_cuda:
            yi = yi.cuda()
        mask = (yi < 0)

        lp = lp[~mask]
        yi = yi[~mask]

        logits.append(lp)
        y_list.append(yi)

    return logits, y_list
コード例 #2
0
def predict_minibatch(model, x, use_cuda):
    b = len(x)
    x, order = pack_sequences(x)
    x = PackedSequence(x.data, x.batch_sizes)
    z = model(x)  # embed the sequences
    z = unpack_sequences(z, order)

    logits = []
    for i in range(b):
        zi = z[i]
        lp = model.predict(zi.unsqueeze(0)).view(zi.size(0), zi.size(0))
        logits.append(lp)

    return logits
コード例 #3
0
def contacts_grad(model, x, y, use_cuda, weight=0.5):
    b = len(x)
    x, order = pack_sequences(x)
    x = PackedSequence(Variable(x.data), x.batch_sizes)
    z = model(x)  # embed the sequences
    z = unpack_sequences(z, order)

    logits = []
    for i in range(b):
        zi = z[i]
        lp = model.predict(zi.unsqueeze(0)).view(-1)
        logits.append(lp)
    logits = torch.cat(logits, 0)

    y = torch.cat([yi.view(-1) for yi in y])
    if use_cuda:
        y = y.cuda()
    mask = (y < 0)

    logits = logits[~mask]
    y = Variable(y[~mask])
    b = y.size(0)

    loss = F.binary_cross_entropy_with_logits(logits, y)

    # backprop weighted loss
    w_loss = loss * weight
    w_loss.backward()

    # calculate the recall and precision
    with torch.no_grad():
        p_hat = F.sigmoid(logits)
        tp = torch.sum(p_hat * y).item()
        gp = y.sum().item()
        pp = p_hat.sum().item()

    return loss.item(), tp, gp, pp, b