Beispiel #1
0
        def J(theta):

            weights = pack_struct(theta, self.layer_units)
            loss, grad = neural_net_loss(weights, X, y, reg)
            grad = flatten_struct(grad)

            return loss, grad
Beispiel #2
0
        def progress(x):

             nonlocal iter_feval, best_weights, best_val_acc
             iter_feval += 1

             # Loss history
             weights = pack_struct(x, self.layer_units)
             loss, grad = neural_net_loss(weights, X, y, reg)
             loss_history.append(loss)

             # Training accurary
             y_pred_train = neural_net_predict(weights, X)
             train_acc = np.mean(y_pred_train == y)
             train_acc_history.append(train_acc)

             # Validation accuracy
             y_pred_val= neural_net_predict(weights, X_val)
             val_acc = np.mean(y_pred_val == y_val)
             val_acc_history.append(val_acc)

             # Keep track of the best weights based on validation accuracy
             if val_acc > best_val_acc:
                 best_val_acc = val_acc
                 n_weights = len(weights)
                 best_weights = [{} for i in range(n_weights)]
                 for i in range(n_weights):
                     for p in weights[i]:
                         best_weights[i][p] = weights[i][p].copy()

             n_iters_verbose = max_iters / 20
             if iter_feval % n_iters_verbose == 0:
                 print("iter: {:4d}, loss: {:8f}, train_acc: {:4f}, val_acc: {:4f}".format(iter_feval, loss, train_acc, val_acc))
Beispiel #3
0
        def progress(x):

            nonlocal iter_feval, best_weights, best_val_acc
            iter_feval += 1

            # Loss history
            weights = pack_struct(x, self.layer_units)
            loss, grad = mlp_loss(weights, X, y, reg)
            loss_history.append(loss)

            # Training accurary
            y_pred_train = mlp_predict(weights, X)
            train_acc = np.mean(y_pred_train == y)
            train_acc_history.append(train_acc)

            # Validation accuracy
            y_pred_val = mlp_predict(weights, X_val)
            val_acc = np.mean(y_pred_val == y_val)
            val_acc_history.append(val_acc)

            # Keep track of the best weights based on validation accuracy
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                n_weights = len(weights)
                best_weights = [{} for i in range(n_weights)]
                for i in range(n_weights):
                    for p in weights[i]:
                        best_weights[i][p] = weights[i][p].copy()

            n_iters_verbose = max_iters / 20
            if iter_feval % n_iters_verbose == 0:
                print(
                    "iter: {:4d}, loss: {:8f}, train_acc: {:4f}, val_acc: {:4f}"
                    .format(iter_feval, loss, train_acc, val_acc))
Beispiel #4
0
        def J(theta):

            weights = pack_struct(theta, self.layer_units)
            loss, grad = mlp_loss(weights, X, y, reg)
            grad = flatten_struct(grad)

            return loss, grad
Beispiel #5
0
        def J(theta):
            weights = pack_struct(theta, self.layer_units)
            loss, grad = sparse_autoencoder_loss(weights,
                                                 X,
                                                 reg,
                                                 beta=beta,
                                                 sparsity_param=sparsity_param)
            grad = flatten_struct(grad)

            return loss, grad
Beispiel #6
0
def test_pack_struct():
    layer_units = (4, 8, 4)
    n_layers = len(layer_units)

    num = 0
    for i in range(n_layers - 1):
        num += layer_units[i + 1] * layer_units[i] + layer_units[i + 1]
    weights = np.random.randn(num)

    actual = flatten_struct(pack_struct(weights, layer_units))
    desired = weights

    assert_allclose(actual, desired, atol=1e-8)
Beispiel #7
0
def test_pack_struct():
    layer_units = (4, 8, 4)
    n_layers = len(layer_units)

    num = 0
    for i in range(n_layers - 1):
        num += layer_units[i+1]*layer_units[i] + layer_units[i+1]
    weights = np.random.randn(num)

    actual  = flatten_struct(pack_struct(weights, layer_units))
    desired = weights

    assert_allclose(actual, desired, atol=1e-8)
Beispiel #8
0
 def pack_struct(self, data):
     return pack_struct(data, self.layer_units)
Beispiel #9
0
 def pack_struct(self, data):
     return pack_struct(data, self.layer_units)