Exemple #1
0
def model_score(model, feed, threshold=1.0):
    from sklearn.metrics import confusion_matrix
    import re

    model.eval()
    pred, fact = model_predict(model, feed)

    n_ll = F.nll_loss(pred, fact, reduction="mean")
    kl_d = sum(penalties(model))

    f_sparsity = sparsity(model, hard=True, threshold=threshold)

    # C_{ij} = \hat{P}(y = i & \hat{y} = j)
    cm = confusion_matrix(fact.numpy(), pred.numpy().argmax(axis=-1))

    tp = cm.diagonal()
    fp, fn = cm.sum(axis=1) - tp, cm.sum(axis=0) - tp

    # format the arrays and remove clutter
    p_str = re.sub("[',]", "", str([f"{p:4.0%}" for p in tp / (tp + fp)]))
    r_str = re.sub("[',]", "", str([f"{p:4.0%}" for p in tp / (tp + fn)]))
    print(
        f"(S) {f_sparsity:.1%} ({float(kl_d):.2e}) "
        f"(A) {tp.sum() / cm.sum():.1%} ({n_ll.item():.2e})"
        f"\n(P) {p_str}"  # \approx (y = i \mid \hat{y} = i)
        f"\n(R) {r_str}"  # \approx (\hat{y} = i \mid y = i)
    )
    print(re.sub(r"(?<=\D)0", ".", str(cm)))

    return model
def model_train(X, y, model, n_steps=20000, threshold=1.0,
                reduction="mean", klw=1e-3, verbose=True):
    import tqdm

    model.train()
    optim = torch.optim.Adam(model.parameters())

    losses = []
    with tqdm.tqdm(range(n_steps)) as bar:
        for i in bar:
            optim.zero_grad()

            y_pred = model(X)

            mse = F.mse_loss(y_pred, y)
            kl_d = sum(penalties(model, reduction=reduction))

            loss = mse + klw * kl_d
            loss.backward()

            optim.step()

            losses.append(float(loss))
            if verbose:
                f_sparsity = sparsity(model, hard=True,
                                      threshold=threshold)
            else:
                f_sparsity = float("nan")

            bar.set_postfix_str(f"{f_sparsity:.1%} {float(mse):.3e} {float(kl_d):.3e}")
        # end for
    # end with
    return model.eval(), losses
Exemple #3
0
def model_fit(model, feed, optim, n_steps=100, threshold=1.0,
              klw=1e-3, reduction="mean", verbose=True):
    losses = []
    with tqdm.tqdm(range(n_steps)) as bar:
        model.train()
        for i in bar:
            for data, target in feed:
                optim.zero_grad()

                n_ll = F.nll_loss(model(data), target, reduction=reduction)
                kl_d = sum(penalties(model, reduction=reduction))

                loss = n_ll + klw * kl_d
                loss.backward()

                optim.step()

                losses.append(float(loss))
                if verbose:
                    f_sparsity = sparsity(model, hard=True,
                                          threshold=threshold)
                else:
                    f_sparsity = float("nan")

                bar.set_postfix_str(
                    f"{f_sparsity:.1%} {float(n_ll):.3e} {float(kl_d):.3e}"
                )
            # end for
        # end for
    # end with
    return model.eval(), losses
def model_test(X, y, model, threshold=1.0):
    model.eval()
    with torch.no_grad():
        mse = F.mse_loss(model(X), y)
        kl_d = sum(penalties(model))

    f_sparsity = sparsity(model, hard=True, threshold=threshold)
    print(f"{f_sparsity:.1%} {mse.item():.3e} {float(kl_d):.3e}")
    return model