예제 #1
0
def test_softmax_train_reshape_input(sample_train, sample_test):
    Xtrain, ytrain = sample_train(count=300)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    softmax = Softmax()
    loss = softmax.train(Xtrain, ytrain, reg=0, learning_rate=1e-6)
    assert loss[0] > loss[-1]
예제 #2
0
def test_softmax_train_reshape_input(sample_train, sample_test):
    Xtrain, ytrain = sample_train(count=300)
    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    softmax = Softmax()
    loss = softmax.train(Xtrain, ytrain, reg=0, learning_rate=1e-6)
    assert loss[0] > loss[-1]
예제 #3
0
def test_softmax_train(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest   = sample_test(count=10)

    with pytest.raises(ValueError):
        #Xtrain has the wrong shape (that is why there is a value error)
        softmax = Softmax()
        softmax.train(Xtrain, ytrain)
예제 #4
0
def test_softmax_train(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    with pytest.raises(ValueError):
        #Xtrain has the wrong shape (that is why there is a value error)
        softmax = Softmax()
        softmax.train(Xtrain, ytrain)
예제 #5
0
def test_softmax_train_1(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest   = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        softmax = Softmax()
        softmax.train(Xtrain, Xtrain)
예제 #6
0
def test_softmax_train_1(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        softmax = Softmax()
        softmax.train(Xtrain, Xtrain)
예제 #7
0
def test_softmax_train_2(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest   = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        #this will catcha Valueerror associated with bad unpacking of a tuple
        softmax = Softmax()
        softmax.train(ytrain, ytrain)
예제 #8
0
def test_softmax_train_2(sample_train, sample_test):
    #this test is designed to verify that input shapes are correct
    Xtrain, ytrain = sample_train(count=40)
    Xtest, ytest = sample_test(count=10)

    Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], -1))
    Xtrain = np.hstack([Xtrain, np.ones((Xtrain.shape[0], 1))])

    with pytest.raises(ValueError):
        #this will catcha Valueerror associated with bad unpacking of a tuple
        softmax = Softmax()
        softmax.train(ytrain, ytrain)
예제 #9
0
def main():
    X_train, y_train, X_val, y_val, X_test, y_test = gen_train_val_test(49000, 1000, 1000)
    #run_softmax_naive(X_train, y_train)
    softmax = Softmax()
    tic = time.time()
    softmax.train(X_train, y_train, learning_rate=2.782559e-06, reg=1e3,num_iters=3000,batch_size=200, verbose=True)

    acc_train = evaluation(softmax, X_train, y_train)
    acc_val = evaluation(softmax, X_val, y_val)
    acc_test = evaluation(softmax, X_test, y_test)
    print 'Train acc :{} Validation :{} Test :{}'.format(acc_train, acc_val, acc_test)
    toc = time.time()
    print 'That took %fs' % (toc - tic)
예제 #10
0
import h5py
from cs231n.classifiers import Softmax
from numpy import loadtxt
import numpy as np
h5f = h5py.File('img_data.h5','r')
X = h5f['dataset_1'][:]
h5f.close()
y = loadtxt("y_labels.txt", dtype=np.uint8, delimiter="\n", unpack=False)

#X_train = np.zeros((27116,196608))
#y_train = np.zeros(27116)
#X_val = np.zeros((5000,196608))
#y_val = np.zeros(5000)

X_train = X[8000:35117,:]
y_train = y[8000:35117]
X_val=X[3000:8000,:]
y_val=y[3000:8000]
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(196608, 5) * 0.0001
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_train, y_train, 0.00001)
softmax=Softmax()
loss_hist = softmax.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
                      num_iters=1500, verbose=False)
y_train_pred = softmax.predict(X_train)
training_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = softmax.predict(X_val)
val_accuracy = np.mean(y_val == y_val_pred)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
예제 #11
0
best_val = -1
best_softmax = None

# same from svm
learning_rates = [1e-7, 5e-8]
regularization_strengths = [5e4, 5e5]
################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
pass
import itertools
for lr, reg in itertools.product(learning_rates, regularization_strengths):
    sm = Softmax()
    sm.train(X_train, y_train, lr, reg, num_iters=1500)

    y_pred = sm.predict(X_train)
    train_accuracy = np.mean(y_pred == y_train)

    y_pred = sm.predict(X_val)
    val_accuracy = np.mean(y_pred == y_val)

    results[(lr, reg)] = (train_accuracy, val_accuracy)
    if val_accuracy > best_val:
        best_val = val_accuracy
        best_softmax = sm

################################################################################
#                              END OF YOUR CODE                                #
예제 #12
0
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1.3e-7]
regularization_strengths = [5e4]

                                                                        
for lr in learning_rates:
    for reg in regularization_strengths:
        softmax = Softmax()
        softmax.train(X_train, y_train, lr, reg,
                      num_iters=500, verbose=True) 
        y_train_pred = softmax.predict(X_train) 
        y_train_acc = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val) 
        y_val_acc = np.mean(y_val == y_val_pred)
        
        results[lr,reg] = (y_train_acc,y_val_acc)
        cur_val = y_val_acc
        if cur_val>best_val:
            best_val = cur_val
            best_softmax = softmax
    
# Print out results.
for lr, reg in sorted(results):
예제 #13
0
파일: softmax.py 프로젝트: walter504/cs231n
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################

for lr in np.linspace(learning_rates[0], learning_rates[1], 5):
    for rs in np.linspace(regularization_strengths[0],
                          regularization_strengths[1], 5):

        softmax = Softmax()
        loss_hist = softmax.train(X_train,
                                  y_train,
                                  learning_rate=lr,
                                  reg=rs,
                                  num_iters=1500,
                                  verbose=True)
        train_accuracy = np.mean(y_train == softmax.predict(X_train))
        val_accuracy = np.mean(y_val == softmax.predict(X_val))
        if best_val < val_accuracy:
            best_val = val_accuracy
            best_softmax = softmax

        results[(lr, rs)] = train_accuracy, val_accuracy

################################################################################
예제 #14
0
loss_vectorized, grad_vectorized = softmax_loss_vectorized(
    W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)

from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7, 1e-6, 5e-6, 1e-5, 5e-5]
regularization_strengths = [1e4, 2e4, 2.5e4, 3e4, 4e4, 5e4]

softmax_model = Softmax()

import itertools

for learn, regul in itertools.product(learning_rates,
                                      regularization_strengths):
    loss_hist = softmax_model.train(X_train,
                                    y_train,
                                    learning_rate=learn,
                                    reg=regul,
                                    num_iters=150,
                                    verbose=False)
    y_train_pred = softmax_model.predict(X_train)
    y_val_pred = softmax_model.predict(X_val)
    train_accur = np.mean(y_train == y_train_pred)
    val_accur = np.mean(y_val == y_val_pred)
예제 #15
0
# 利用验证集来微调超参数(正则化参数和学习率)
from cs231n.classifiers import Softmax

results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 1.5e-7, 0.75e-7, 1.25e-7]
reg_strengths = [2.5e4, 3e4, 3.25e4, 3.5e4, 4e4, 4.5e4, 5e4]
# learning_rates = np.logspace(-10, 10, 10)
# reg_strengths = np.logspace(-3, 6, 10)

# 用验证集调整
for lr in learning_rates:
    for rs in reg_strengths:
        softmax = Softmax()
        softmax.train(X_train,
                      y_train,
                      learning_rate=lr,
                      reg=rs,
                      num_iters=1000)

y_train_pred = softmax.predict(X_train)
acc_train = np.mean(y_train == y_train_pred)
y_val_pred = softmax.predict(X_val)
acc_val = np.mean(y_val == y_val_pred)

results[(lr, rs)] = (acc_train, acc_val)

if best_val < acc_val:
    best_val = acc_val
예제 #16
0
best_softmax = None
learning_rates = [1e-7, 2e-7, 5e-7]
#regularization_strengths = [5e4, 1e8]
regularization_strengths = [(1 + 0.1 * i) * 1e4
                            for i in range(-3, 4)] + [(5 + 0.1 * i) * 1e4
                                                      for i in range(-3, 4)]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lr in learning_rates:
    for rs in regularization_strengths:
        softmax = Softmax()
        softmax.train(X_train, y_train, lr, rs, num_iters=2000)
        y_train_pred = softmax.predict(X_train)
        train_accuracy = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val)
        val_accuracy = np.mean(y_val == y_val_pred)
        if val_accuracy > best_val:
            best_val = val_accuracy
            best_softmax = softmax
        results[(lr, rs)] = train_accuracy, val_accuracy
#pass
################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Print out results.
예제 #17
0
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [3e-7, 1e-6, 3e-6]
regularization_strengths = [1e3, 3e3, 1e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lr in learning_rates:
    for reg in regularization_strengths:
        softmax = Softmax()
        softmax.train(X_train,
                      y_train,
                      learning_rate=lr,
                      reg=reg,
                      num_iters=1501,
                      verbose=False)

        y_train_pred = softmax.predict(X_train)
        training_accuracy = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val)
        validation_accuracy = np.mean(y_val == y_val_pred)
        results[(lr, reg)] = (training_accuracy, validation_accuracy)

        if best_val < validation_accuracy:
            best_val = validation_accuracy
예제 #18
0
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that    you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
params = [(x, y) for x in learning_rates for y in regularization_strengths]
for lrate, regular in params:
    softmax = Softmax()
    loss_hist = softmax.train(X_train,
                              y_train,
                              learning_rate=lrate,
                              reg=regular,
                              num_iters=700,
                              verbose=True)
    y_train_pred = softmax.predict(X_train)
    accuracy_train = np.mean(y_train == y_train_pred)
    y_val_pred = softmax.predict(X_val)
    accuracy_val = np.mean(y_val == y_val_pred)
    results[(lrate, regular)] = (accuracy_train, accuracy_val)
    if (best_val < accuracy_val):
        best_val = accuracy_val
        best_softmax = softmax
################################################################################
예제 #19
0
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
num_iterations = 200
for lr in learning_rates:
    for reg in regularization_strengths:
        softmax = Softmax()
        softmax.train(X_train,
                      y_train,
                      learning_rate=lr,
                      reg=reg,
                      num_iters=num_iterations)
        y_train_pred = softmax.predict(X_train)
        y_val_pred = softmax.predict(X_val)
        train_accuracy = np.mean(y_train == y_train_pred)
        valid_accuracy = np.mean(y_val == y_val_pred)
        results[(lr, reg)] = (train_accuracy, valid_accuracy)
        if valid_accuracy > best_val:
            best_val = valid_accuracy
            best_softmax = softmax
################################################################################
#                              END OF YOUR CODE                                #
예제 #20
0
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
print '-------------_*****--------------'
print 'Tuning Parameters: '
for lr in np.arange(0.0000001, 0.0000005, 0.00000005):
    for reg in np.arange(5e4, 1e8, 20000):
        sm_iter = Softmax()
        sm_iter.train(X_train,
                      y_train,
                      learning_rate=lr,
                      reg=reg,
                      num_iters=3000,
                      verbose=True)
        y_val_pred_iter = sm_iter.predict(X_val)
        y_train_pred_iter = sm_iter.predict(X_train)
        val_acc = np.mean(y_val == y_val_pred_iter)
        train_acc = np.mean(y_train == y_train_pred_iter)
        results[(lr, reg)] = (train_acc, val_acc)  # Turple Mapping
        print 'Validation accuracy: %f' % (val_acc)
        if (val_acc > best_val):
            best_val = val_acc
            best_softmax = sm_iter
예제 #21
0
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****

num_learning = len(learning_rates)
num_regularization = len(regularization_strengths)
for i in range(num_learning):
    for j in range(num_regularization):
        softmax = Softmax()
        loss_hist = softmax.train(X_train, y_train, learning_rate=learning_rates[i], reg=regularization_strengths[j],
                              num_iters=1500, verbose=False)
        y_train_pred = softmax.predict(X_train)
        accuracy_train = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val)
        accuracy_val = np.mean(y_val == y_val_pred)

        results[(learning_rates[i], regularization_strengths[j])] = (accuracy_train, accuracy_val)

        if accuracy_val > best_val:
            best_val = accuracy_val
            best_softmax = softmax

# pass
예제 #22
0
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]
tic = time.clock()
################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for ilr in np.arange(learning_rates[0], learning_rates[1],
                     0.05 * (learning_rates[1] - learning_rates[0])):
    for ireg in np.arange(
            regularization_strengths[0], regularization_strengths[1], 0.05 *
        (regularization_strengths[1] - regularization_strengths[0])):
        sm = Softmax()
        _ = sm.train(X_train,
                     y_train,
                     learning_rate=ilr,
                     reg=ireg,
                     num_iters=1500,
                     verbose=True)
        y_train_pred = sm.predict(X_train)
        train_acc = np.mean(y_train == y_train_pred)
        print('training accuracy: %f' % (train_acc))
        y_val_pred = sm.predict(X_val)
        val_acc = np.mean(y_val == y_val_pred)
        if val_acc > best_val:
            best_val = val_acc
            best_softmax = sm
        print('validation accuracy: %f' % (val_acc))
예제 #23
0
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7]
regularization_strengths = [6e4,4e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lerRate in learning_rates:
    for regStrength in regularization_strengths:
        
        softmax = Softmax()
        tic = time.time()
        loss_hist = softmax.train(X_train, y_train, learning_rate=lerRate, reg=regStrength,
                      num_iters=1500, verbose=True)
        toc = time.time()
        print 'That took %fs' % (toc - tic)
        plt.plot(loss_hist)
        plt.xlabel('Iteration number')
        plt.ylabel('Loss value')
        plt.show()
        y_train_pred = softmax.predict(X_train)
        print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
        trainingAccu = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val)
        print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
        validationAccu = np.mean(y_val == y_val_pred)
예제 #24
0
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################


from copy import deepcopy
for lr in  learning_rates:
    for reg in regularization_strengths:
        softmax=Softmax()
        train_loss=softmax.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=3000,
            batch_size=200, verbose=False)
        y_train_pred=softmax.predict(X_train)
        train_accuracy = np.mean(y_train==y_train_pred)
        y_val_pred=softmax.predict(X_val)
        val_accuracy = np.mean(y_val==y_val_pred)
        results[(lr,reg)]=(train_accuracy,val_accuracy)
        if val_accuracy>best_val:
            best_val=val_accuracy
            best_softmax=deepcopy(softmax)


################################################################################
#                              END OF YOUR CODE                                #
################################################################################
예제 #25
0
from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))

# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)

###################################################################################
from cs231n.classifiers import Softmax, LinearSVM
svm = Softmax()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
                      num_iters=1500, verbose=True)   
      
toc = time.time()
print('That took %fs' % (toc - tic))

plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()

# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
예제 #26
0
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
learning_rates = [1e-7, 5e-7, 1e-6, 5e-6]
regularization_strengths = [1e3, 5e3, 1e4, 5e4]

# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1  # The highest validation accuracy that we have seen so far.
best_softmax = None  # The LinearSVM object that achieved the highest validation rate.

for lr in learning_rates:
    for reg in regularization_strengths:
        sm = Softmax()
        loss_hist = sm.train(X_train,
                             y_train,
                             learning_rate=lr,
                             reg=reg,
                             num_iters=1500,
                             verbose=False)
        y_train_pred = sm.predict(X_train)
        tr_acc = np.mean(y_train == y_train_pred)
        #print('training accuracy: %f' % (tr_acc,))
        y_val_pred = sm.predict(X_val)
        val_acc = np.mean(y_val == y_val_pred)
        #print('validation accuracy: %f' % (val_acc,))
        results[(lr, reg)] = (tr_acc, val_acc)
        if val_acc > best_val:
            best_val = val_acc
예제 #27
0
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
# Your code
################################################################################
#                              END OF YOUR CODE                                #
################################################################################
lr_sample = np.linspace(learning_rates[0],learning_rates[1],1)
reg_sample = np.linspace(regularization_strengths[0],regularization_strengths[1],1)

tic = time.time()

for s1 in lr_sample:
    for s2 in reg_sample:
        soft = Softmax()
        # getting the trained modal for the current svm object
        soft.train(X_train,y_train,learning_rate = s1,reg = s2,num_iters = 1500,verbose=False)
        y_train_predicted = soft.predict(X_train)
        # validation of the current svm modal
        train_accuracy = np.mean(y_train_predicted == y_train)
        # validation of the trained modal on unknown set
        y_val_predicted = soft.predict(X_val)
        # validation of the current svm modal
        val_accuracy = np.mean(y_val_predicted == y_val)
        
        results[(s1,s2)] = (train_accuracy,val_accuracy)
        if val_accuracy > best_val : 
            best_val = val_accuracy
            best_softmax = soft
toc = time.time()
예제 #28
0
best_val = -1
best_softmax = None
learning_rates = [1e-8, 5e-7]
regularization_strengths = [5e-4, 1e-8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for lr_idx in range(0,len(learning_rates)):
    for reg_idx in range(0,len(regularization_strengths)):
        lr_use = learning_rates[lr_idx]
        reg_use = regularization_strengths[reg_idx]
        sfmx = Softmax()
        loss_hist = sfmx.train(X_dev, y_dev, learning_rate=lr_use, reg=reg_use,
                      num_iters=1500, verbose=True)
        y_train_pred = sfmx.predict(X_dev)
        y_val_pred = sfmx.predict(X_val)

        acc_train = np.mean(y_train == y_train_pred)
        acc_val = np.mean(y_val == y_val_pred)
        if acc_val > best_val:
            best_lr = lr_use
            best_reg = reg_use
            best_val = acc_val
            best_sfmx = sfmx
        results_once = {(lr_use, reg_use): (acc_train, acc_val)}
        results.update(results_once)
################################################################################
예제 #29
0
파일: sofe.py 프로젝트: eejackliu/my1
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################

record=0
for i in learning_rates:
    for j in regularization_strengths:
        soft=Softmax()
        soft.train(X_train,y_train,learning_rate=i,reg=j,num_iters=1500,verbose=True)
        pred=soft.predict(X_val)
        precise=np.mean((pred==y_val))
        pred_train=soft.predict(X_train)
        tmp=np.mean((pred_train==y_train))
        results[i,j]=[tmp,precise]
        if precise>best_val:
            best_softmax=soft
            best_val=precise

################################################################################
#                              END OF YOUR CODE                                #
################################################################################

# Print out results.
예제 #30
0
# 训练模型
X_train = np.reshape(X_train, (X_train.shape[0], -1))  # 1维展开
X_train = np.hstack([X_train, np.ones([X_train.shape[0], 1])])
X_test = np.reshape(X_test, (X_test.shape[0], -1))  # 1维展开
X_test = np.hstack([X_test, np.ones([X_test.shape[0], 1])])
num_class = 10
W = np.random.randn(X_train.shape[1], num_class) * 0.001
# 检查数值梯度和解析梯度
from cs231n.classifiers import softmax_loss_naive, softmax_loss_vectorized
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.5)
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_vectorized(w, X_train, y_train, 0.5)[0]
grad_check_sparse(f, W, grad)
from cs231n.classifiers import Softmax
classifer = Softmax()
loss_hist = classifer.train(X_train,
                            y_train,
                            verbose=True,
                            num_iters=5000,
                            batch_size=100)
plt.plot(loss_hist)
plt.xlabel('Step')
plt.ylabel('Loss')
plt.show()
# 泛化准确率
y_pred = classifer.predict(X_test)
accuracy = np.mean(y_pred == y_test)
print("Test accuracy:", accuracy)

# from cs231n.classifiers import KNearestNeighbor, KNN_test, KNN_train
X2_test_feats = np.dot(X_test_feats,
                       best_net.params['W1']) + best_net.params['b1']

learning_rates = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0]
regularization_strengths = [0, 1e1]

results = {}
best_val = -1
best_softmax = None

################################################################################
# Softmax
################################################################################
for _l in learning_rates:
    for _r in regularization_strengths:
        softmax = Softmax()
        loss_hist = softmax.train(X2_train_feats,
                                  y_train,
                                  learning_rate=_l,
                                  reg=_r,
                                  num_iters=1500,
                                  verbose=False)

        y_train_pred = softmax.predict(X2_train_feats)
        train_accuracy = np.mean(y_train == y_train_pred)

        y_val_pred = softmax.predict(X2_val_feats)
        val_accuracy = np.mean(y_val == y_val_pred)
        print("train accuracy:{0}, val accuracy : {1}".format(
            train_accuracy, val_accuracy))
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [5e4, 1e8]

################################################################################
# TODO:                                                                        #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save    #
# the best trained softmax classifer in best_softmax.                          #
################################################################################
for alpha in learning_rates:
    for lam in regularization_strengths:
        softmax = Softmax()
        loss_hist = softmax.train(X_train,
                                  y_train,
                                  learning_rate=alpha,
                                  reg=lam,
                                  num_iters=500,
                                  verbose=True)
        y_tr_pred = softmax.predict(X_train)
        training_accuracy = float(np.mean(y_tr_pred == y_train))
        y_cv_pred = softmax.predict(X_val)
        cv_accuracy = float(np.mean(y_cv_pred == y_val))

        results[(alpha, lam)] = (training_accuracy, cv_accuracy)
        #getting many NaN over here... probably bcs of learning rate/regularization,
        # didn't seem to have bugs (as also runs correctly and accuracy is predicted well)
        if cv_accuracy > best_val: