Exemplo n.º 1
0
            def training_loss(weights, idx=0):
                # Training loss is the negative log-likelihood of the training labels.
                t_idx_ = batch_indices(idx)
                preds = model_(weights, X[t_idx_, :])
                loglik = -np.sum(np.log(preds + esp) * y_ohe[t_idx_, :])

                num_unpack = 3
                reg = 0
                # reg_l1 = np.sum(np.abs(flattened)) * 1.
                for idx_ in range(0, len(weights), num_unpack):
                    param_temp_ = weights[idx_:idx_ + num_unpack]
                    flattened, _ = weights_flatten(param_temp_[:2])
                    reg_l1 = np.sum(np.abs(flattened)) * 1.0
                    reg += reg_l1
                return loglik + reg
Exemplo n.º 2
0
        def training_loss(weights, idx=0):
            # Training loss is the negative log-likelihood of the training labels.
            t_idx_ = batch_indices(idx)
            preds = sigmoid(model_(weights, X[t_idx_, :]))
            label_probabilities = preds * y[t_idx_] + (1 - preds) * (1 -
                                                                     y[t_idx_])
            # print(label_probabilities)
            loglik = -np.sum(np.log(label_probabilities))

            num_unpack = 3
            reg = 0
            # reg_l1 = np.sum(np.abs(flattened)) * 1.
            for idx_ in range(0, len(weights), num_unpack):
                param_temp_ = weights[idx_:idx_ + num_unpack]
                flattened, _ = weights_flatten(param_temp_[:2])
                reg_l1 = np.sum(np.abs(flattened)) * 1.0
                reg += reg_l1
            return loglik + reg
Exemplo n.º 3
0
def l2_norm(params):
    """Computes l2 norm of params by flattening them into a vector."""
    flattened, _ = weights_flatten(params)
    return np.dot(flattened, flattened)