Exemple #1
0
def test_softmax_train_reshape_input(sample_train, sample_test):
    Xtrain, ytrain = sample_train(count=300)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    softmax = Softmax()
    loss = softmax.train(Xtrain, ytrain, reg=0, learning_rate=1e-6)
    assert loss[0] > loss[-1]
Exemple #2
0
def test_softmax_train(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    with pytest.raises(ValueError):
        #Xtrain has the wrong shape (that is why there is a value error)
        softmax = Softmax()
        softmax.train(Xtrain, ytrain)
Exemple #3
0
def test_softmax_train_1(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        softmax = Softmax()
        softmax.train(Xtrain, Xtrain)
Exemple #4
0
def test_softmax_train_2(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        #this will catcha Valueerror associated with bad unpacking of a tuple
        softmax = Softmax()
        softmax.train(ytrain, ytrain)
Exemple #5
0
def main():
    X_train, y_train, X_val, y_val, X_test, y_test = gen_train_val_test(49000, 1000, 1000)
    #run_softmax_naive(X_train, y_train)
    softmax = Softmax()
    tic = time.time()
    softmax.train(X_train, y_train, learning_rate=2.782559e-06, reg=1e3,num_iters=3000,batch_size=200, verbose=True)

    acc_train = evaluation(softmax, X_train, y_train)
    acc_val = evaluation(softmax, X_val, y_val)
    acc_test = evaluation(softmax, X_test, y_test)
    print 'Train acc :{} Validation :{} Test :{}'.format(acc_train, acc_val, acc_test)
    toc = time.time()
    print 'That took %fs' % (toc - tic)
X2_test_feats = np.dot(X_test_feats,
                       best_net.params['W1']) + best_net.params['b1']

learning_rates = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0]
regularization_strengths = [0, 1e1]

results = {}
best_val = -1
best_softmax = None

################################################################################
# Softmax
################################################################################
for _l in learning_rates:
    for _r in regularization_strengths:
        softmax = Softmax()
        loss_hist = softmax.train(X2_train_feats,
                                  y_train,
                                  learning_rate=_l,
                                  reg=_r,
                                  num_iters=1500,
                                  verbose=False)

        y_train_pred = softmax.predict(X2_train_feats)
        train_accuracy = np.mean(y_train == y_train_pred)

        y_val_pred = softmax.predict(X2_val_feats)
        val_accuracy = np.mean(y_val == y_val_pred)
        print("train accuracy:{0}, val accuracy : {1}".format(
            train_accuracy, val_accuracy))
Exemple #7
0
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################

record=0
for i in learning_rates:
    for j in regularization_strengths:
        soft=Softmax()
        soft.train(X_train,y_train,learning_rate=i,reg=j,num_iters=1500,verbose=True)
        pred=soft.predict(X_val)
        precise=np.mean((pred==y_val))
        pred_train=soft.predict(X_train)
        tmp=np.mean((pred_train==y_train))
        results[i,j]=[tmp,precise]
        if precise>best_val:
            best_softmax=soft
            best_val=precise

################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Print out results.
Exemple #8
0
best_val = -1
best_softmax = None

# same from svm
learning_rates = [1e-7, 5e-8]
regularization_strengths = [5e4, 5e5]
################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
pass
import itertools
for lr, reg in itertools.product(learning_rates, regularization_strengths):
    sm = Softmax()
    sm.train(X_train, y_train, lr, reg, num_iters=1500)

    y_pred = sm.predict(X_train)
    train_accuracy = np.mean(y_pred == y_train)

    y_pred = sm.predict(X_val)
    val_accuracy = np.mean(y_pred == y_val)

    results[(lr, reg)] = (train_accuracy, val_accuracy)
    if val_accuracy > best_val:
        best_val = val_accuracy
        best_softmax = sm

################################################################################
#                              END OF YOUR CODE                                #
Exemple #9
0
best_val = -1
best_softmax = None
learning_rates = [1e-8, 5e-7]
regularization_strengths = [5e-4, 1e-8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lr_idx in range(0,len(learning_rates)):
    for reg_idx in range(0,len(regularization_strengths)):
        lr_use = learning_rates[lr_idx]
        reg_use = regularization_strengths[reg_idx]
        sfmx = Softmax()
        loss_hist = sfmx.train(X_dev, y_dev, learning_rate=lr_use, reg=reg_use,
                      num_iters=1500, verbose=True)
        y_train_pred = sfmx.predict(X_dev)
        y_val_pred = sfmx.predict(X_val)

        acc_train = np.mean(y_train == y_train_pred)
        acc_val = np.mean(y_val == y_val_pred)
        if acc_val > best_val:
            best_lr = lr_use
            best_reg = reg_use
            best_val = acc_val
            best_sfmx = sfmx
        results_once = {(lr_use, reg_use): (acc_train, acc_val)}
        results.update(results_once)
################################################################################
Exemple #10
0
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
print '-------------_*****--------------'
print 'Tuning Parameters: '
for lr in np.arange(0.0000001, 0.0000005, 0.00000005):
    for reg in np.arange(5e4, 1e8, 20000):
        sm_iter = Softmax()
        sm_iter.train(X_train,
                      y_train,
                      learning_rate=lr,
                      reg=reg,
                      num_iters=3000,
                      verbose=True)
        y_val_pred_iter = sm_iter.predict(X_val)
        y_train_pred_iter = sm_iter.predict(X_train)
        val_acc = np.mean(y_val == y_val_pred_iter)
        train_acc = np.mean(y_train == y_train_pred_iter)
        results[(lr, reg)] = (train_acc, val_acc)  # Turple Mapping
        print 'Validation accuracy: %f' % (val_acc)
        if (val_acc > best_val):
            best_val = val_acc
            best_softmax = sm_iter
Exemple #11
0
loss_vectorized, grad_vectorized = softmax_loss_vectorized(
    W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)

from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7, 1e-6, 5e-6, 1e-5, 5e-5]
regularization_strengths = [1e4, 2e4, 2.5e4, 3e4, 4e4, 5e4]

softmax_model = Softmax()

import itertools

for learn, regul in itertools.product(learning_rates,
                                      regularization_strengths):
    loss_hist = softmax_model.train(X_train,
                                    y_train,
                                    learning_rate=learn,
                                    reg=regul,
                                    num_iters=150,
                                    verbose=False)
    y_train_pred = softmax_model.predict(X_train)
    y_val_pred = softmax_model.predict(X_val)
    train_accur = np.mean(y_train == y_train_pred)
    val_accur = np.mean(y_val == y_val_pred)
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e5]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################

for lr in learning_rates:
    for reg in regularization_strengths:

        soft_max = Softmax()
        loss_hist = soft_max.train(X_train,
                                   y_train,
                                   learning_rate=lr,
                                   reg=reg,
                                   num_iters=700,
                                   verbose=True)
        y_train_pred = soft_max.predict(X_train)
        y_val_pred = soft_max.predict(X_val)
        y_train_acc = (np.mean(y_train == y_train_pred))
        y_val_acc = (np.mean(y_val == y_val_pred))
        results[(lr, reg)] = (y_train_acc, y_val_acc)
        if y_val_acc > best_val:
            best_val = y_val_acc
            best_softmax = soft_max
Exemple #13
0
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lr in learning_rates:
    for r in regularization_strengths:
        print(lr)
        print(r)
        smax = Softmax()
        loss_hist = smax.train(X_train,
                               y_train,
                               learning_rate=lr,
                               reg=r,
                               num_iters=1000,
                               verbose=True)
        y_train_pred = smax.predict(X_train)
        training_accuracy = np.mean(y_train == y_train_pred)
        y_val_pred = smax.predict(X_val)
        validation_accuracy = np.mean(y_val == y_val_pred)
        results[(lr, r)] = (training_accuracy, validation_accuracy)
        if (validation_accuracy > best_val):
            best_val = validation_accuracy
            best_softmax = smax
Exemple #14
0
# 训练模型
X_train = np.reshape(X_train, (X_train.shape[0], -1))  # 1维展开
X_train = np.hstack([X_train, np.ones([X_train.shape[0], 1])])
X_test = np.reshape(X_test, (X_test.shape[0], -1))  # 1维展开
X_test = np.hstack([X_test, np.ones([X_test.shape[0], 1])])
num_class = 10
W = np.random.randn(X_train.shape[1], num_class) * 0.001
# 检查数值梯度和解析梯度
from cs231n.classifiers import softmax_loss_naive, softmax_loss_vectorized
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.5)
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_vectorized(w, X_train, y_train, 0.5)[0]
grad_check_sparse(f, W, grad)
from cs231n.classifiers import Softmax
classifer = Softmax()
loss_hist = classifer.train(X_train,
                            y_train,
                            verbose=True,
                            num_iters=5000,
                            batch_size=100)
plt.plot(loss_hist)
plt.xlabel('Step')
plt.ylabel('Loss')
plt.show()
# 泛化准确率
y_pred = classifer.predict(X_test)
accuracy = np.mean(y_pred == y_test)
print("Test accuracy:", accuracy)

# from cs231n.classifiers import KNearestNeighbor, KNN_test, KNN_train