示例#1
0
def adam(nn,
         X_train,
         y_train,
         val_set=None,
         alpha=0.001,
         mb_size=256,
         n_iter=2000,
         print_after=100,
         max_norm=None):
    M = {k: np.zeros_like(v) for k, v in nn.model.items()}
    R = {k: np.zeros_like(v) for k, v in nn.model.items()}
    beta1 = .9
    beta2 = .999

    minibatches = get_minibatch(X_train, y_train, mb_size)

    if val_set:
        X_val, y_val = val_set

    for iter in range(1, n_iter + 1):
        t = iter
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        grad, loss = nn.train_step(X_mini, y_mini)

        if iter % print_after == 0:
            if val_set:
                val_acc = util.accuracy(y_val, nn.predict(X_val))
                print('Iter-{} loss: {:.4f} validation: {:4f}'.format(
                    iter, loss, val_acc))
            else:
                print('Iter-{} loss: {:.4f}'.format(iter, loss))

        for k in grad:
            M[k] = util.exp_running_avg(M[k], grad[k], beta1)
            R[k] = util.exp_running_avg(R[k], grad[k]**2, beta2)

            m_k_hat = M[k] / (1. - beta1**(t))
            r_k_hat = R[k] / (1. - beta2**(t))

            nn.model[k] -= alpha * m_k_hat / (np.sqrt(r_k_hat) + c.eps)
            if max_norm != None:
                nn.model[k] = reg.limit_norm(nn.model[k], max_val=max_norm)

    return nn
示例#2
0
def nesterov(nn,
             X_train,
             y_train,
             val_set=None,
             alpha=1e-3,
             mb_size=256,
             n_iter=2000,
             print_after=100,
             max_norm=None):
    velocity = {k: np.zeros_like(v) for k, v in nn.model.items()}
    gamma = .9

    minibatches = get_minibatch(X_train, y_train, mb_size)

    if val_set:
        X_val, y_val = val_set

    for iter in range(1, n_iter + 1):
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        nn_ahead = copy.deepcopy(nn)
        nn_ahead.model.update(
            {k: v + gamma * velocity[k]
             for k, v in nn.model.items()})
        grad, loss = nn_ahead.train_step(X_mini, y_mini)

        if iter % print_after == 0:
            if val_set:
                val_acc = util.accuracy(y_val, nn.predict(X_val))
                print('Iter-{} loss: {:.4f} validation: {:4f}'.format(
                    iter, loss, val_acc))
            else:
                print('Iter-{} loss: {:.4f}'.format(iter, loss))

        for layer in grad:
            velocity[layer] = gamma * velocity[layer] + alpha * grad[layer]
            nn.model[layer] -= velocity[layer]
            if max_norm != None:
                nn.model[layer] = reg.limit_norm(nn.model[layer],
                                                 max_val=max_norm)

    return nn
示例#3
0
def sgd(nn,
        X_train,
        y_train,
        val_set=None,
        alpha=1e-3,
        mb_size=256,
        n_iter=2000,
        print_after=100,
        max_norm=None):
    minibatches = get_minibatch(X_train, y_train, mb_size)

    if val_set:
        X_val, y_val = val_set

    for iter in range(1, n_iter + 1):
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        grad, loss = nn.train_step(X_mini, y_mini)

        # Print phased accuracy
        if iter % print_after == 0:
            if val_set:
                val_acc = util.accuracy(y_val, nn.predict(X_val))
                print('Iter-{} loss: {:.4f} validation: {:4f}'.format(
                    iter, loss, val_acc))
            else:
                print('Iter-{} loss: {:.4f}'.format(iter, loss))

        #for layer in grad:
        #   nn.model[layer] -= alpha * grad[layer]
        for layer in grad:
            nn.model[layer] -= alpha * grad[layer]
            if max_norm != None:
                nn.model[layer] = reg.limit_norm(nn.model[layer],
                                                 max_val=max_norm)

    return nn
示例#4
0
def rmsprop(nn,
            X_train,
            y_train,
            val_set=None,
            alpha=1e-3,
            mb_size=256,
            n_iter=2000,
            print_after=100,
            max_norm=None):
    cache = {k: np.zeros_like(v) for k, v in nn.model.items()}
    gamma = .9

    minibatches = get_minibatch(X_train, y_train, mb_size)

    if val_set:
        X_val, y_val = val_set

    for iter in range(1, n_iter + 1):
        idx = np.random.randint(0, len(minibatches))
        X_mini, y_mini = minibatches[idx]

        grad, loss = nn.train_step(X_mini, y_mini)

        if iter % print_after == 0:
            if val_set:
                val_acc = util.accuracy(y_val, nn.predict(X_val))
                print('Iter-{} loss: {:.4f} validation: {:4f}'.format(
                    iter, loss, val_acc))
            else:
                print('Iter-{} loss: {:.4f}'.format(iter, loss))

        for k in grad:
            cache[k] = util.exp_running_avg(cache[k], grad[k]**2, gamma)
            nn.model[k] -= alpha * grad[k] / (np.sqrt(cache[k]) + c.eps)
            if max_norm != None:
                nn.model[k] = reg.limit_norm(nn.model[k], max_val=max_norm)

    return nn
示例#5
0
def adam_rnn(nn,
             X_train,
             y_train,
             alpha=0.001,
             mb_size=256,
             n_iter=2000,
             print_after=100,
             max_norm=None):
    M = {k: np.zeros_like(v) for k, v in nn.model.items()}
    R = {k: np.zeros_like(v) for k, v in nn.model.items()}
    beta1 = .9
    beta2 = .999

    minibatches = get_minibatch(X_train, y_train, mb_size, shuffle=False)

    idx = 0
    state = nn.initial_state()
    smooth_loss = -np.log(1.0 / len(set(X_train)))

    for iter in range(1, n_iter + 1):
        t = iter

        if idx >= len(minibatches):
            idx = 0
            state = nn.initial_state()

        X_mini, y_mini = minibatches[idx]
        idx += 1

        if iter % print_after == 0:
            print(
                "========================================================================="
            )
            print('Iter-{} loss: {:.4f}'.format(iter, smooth_loss))
            print(
                "========================================================================="
            )

            sample = nn.sample(X_mini[0], state, 100)
            print(sample)

            print(
                "========================================================================="
            )
            print()
            print()

        grad, loss, state = nn.train_step(X_mini, y_mini, state)
        smooth_loss = 0.999 * smooth_loss + 0.001 * loss

        for k in grad:
            M[k] = util.exp_running_avg(M[k], grad[k], beta1)
            R[k] = util.exp_running_avg(R[k], grad[k]**2, beta2)

            m_k_hat = M[k] / (1. - beta1**(t))
            r_k_hat = R[k] / (1. - beta2**(t))

            nn.model[k] -= alpha * m_k_hat / (np.sqrt(r_k_hat) + c.eps)
            if max_norm != None:
                nn.model[k] = reg.limit_norm(nn.model[k], max_val=max_norm)

    return nn