Пример #1
0
def pick_hyperparams(X_train, y_train, X_val, y_val, learning_rates, regularization_strengths, iterations=4000, batches=400):
    results = {}
    best_val = -1
    best_softmax = None

    ################################################################################
    # TODO:                                                                        #
    # Use the validation set to set the learning rate and regularization strength. #
    # Save the best trained softmax classifer in best_softmax.                     #
    # Hint: about 10 lines of code expected
    ################################################################################
    for lr in learning_rates:
        for reg in regularization_strengths:
            print lr, reg
            softmax = SoftmaxClassifier()
            softmax.train(X_train, y_train, lr, reg, iterations, batches)
            predy = softmax.predict(X_val)
            val = np.mean(predy == y_val)
            predtrain = softmax.predict(X_train)
            valtrain = np.mean(predtrain == y_val)
            results[(lr, reg)] = (valtrain, val)
            if val > best_val:
                best_val = val
                best_softmax = softmax

    return best_softmax, results, best_val
Пример #2
0
# TODO: Use the validation set to tune hyperparameters for softmax classifier
# choose learning rate and regularization strength (use the code from softmax_hw.py)


<<<<<<< HEAD
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-4,1e-3,1e-2,1e-1]
regularization_strengths = [0.01,0.05,0.1,0.5,1]
for lr in learning_rates:
	for rs in regularization_strengths:
		print("calculating: lr=%e,reg=%e"%(lr,rs))
		ns=SoftmaxClassifier()
		ns.train(X_train,y_train,lr,rs,batch_size=400,num_iters=2000)
		ta=np.mean(y_train == ns.predict(X_train))
		va=np.mean(y_val == ns.predict(X_val))
		results[lr,rs]=(ta,va)
		if va>best_val:
			best_val=va
			best_softmax=ns



# TODO: Evaluate best softmax classifier on set aside test set (use the code from softmax_hw.py)
# Print out results.
for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
                lr, reg, train_accuracy, val_accuracy)
    
Пример #3
0
import numpy as np
from data import make_spiral_data, plot_2d_data
from softmax import SoftmaxClassifier


if __name__ == '__main__':

    # generate spiral data
    n_classes = 3
    data, y = make_spiral_data(100, n_classes, 2)
    fig = plot_2d_data(data, y)
    fig.show()

    # train model
    model = SoftmaxClassifier(n_classes)
    model.fit(data, y)
    print("Training accuracy is {:0.2f}".format(model.training_accuracy))
    print("Training loss is {:0.3f}".format(model.training_loss))
    boundaries = model.plot_boundaries(data, y)
    boundaries.show()
    loss_vs_epoch = model.plot_training_loss()
    loss_vs_epoch.show()

    # test model against 'new' data
    new_data, new_y = make_spiral_data(50, n_classes, 2)
    predictions = model.predict(new_data)
    print("Test accuracy is {:0.2f}".format(model.accuracy(new_data, new_y)))

Пример #4
0
# choose learning rate and regularization strength (use the code from softmax_hw.py)
batch_sizes = [200, 300, 400]
iterations = [1000,2000,3000]
learning_rates = [5e-7, 1e-6, 5e-6]
regularization_strengths = [1e2, 1e3,1e4, 1e5]
best_val = -1
best_softmax = None

for batch_idx, batch_size in enumerate(batch_sizes):
  for it_idx, iteration in enumerate(iterations):
    for learningrate in learning_rates:
      for regularization in regularization_strengths:
        softmax=SoftmaxClassifier()

        softmax.train(X_train,y_train,learningrate,reg=regularization, num_iters=iteration,batch_size=batch_size, verbose=True)
        y_pred_val=softmax.predict(X_val)
        current_val = np.mean(y_pred_val==y_val)
        if(current_val>best_val):
          best_val = current_val
          best_softmax = softmax
          best_learning_rate = learningrate
          best_reg = regularization
          best_iteration = iteration
          best_batch_size = batch_size

print "best batch size is ", best_batch_size
print "best iteration is ", best_iteration
print "best reg is ", best_reg
print "best learning rate is ", best_learning_rate

Пример #5
0
#regularization_strengths = [ 5e4, 1e5, 5e5, 1e8]
learning_rates = [5e-6]
regularization_strengths = [1e5]

cl = SoftmaxClassifier()
loss_hist = []
for lr, rs in itertools.product(learning_rates, regularization_strengths):
    _ = cl.train(X_train,
                 y_train,
                 lr,
                 rs,
                 num_iters=4000,
                 batch_size=400,
                 verbose=True)
    loss, _ = cl.loss(X_val, y_val, rs)
    pred_t = cl.predict(X_train)
    pred_v = cl.predict(X_val)
    #embed()
    train_match = np.where(pred_t == y_train)
    train_accuracy = float(len(train_match[0])) / len(y_train)
    val_match = np.where(pred_v == y_val)
    val_accuracy = float(len(val_match[0])) / len(y_val)
    results[(lr, rs)] = (train_accuracy, val_accuracy)
    loss_hist.append(loss)
    # print("For lr,rs = ",lr,rs, "Loss  value = ",loss)

#embed()
ind = np.where(loss_hist == np.min(loss_hist))[0][0]
parameters = list(itertools.product(learning_rates,
                                    regularization_strengths))[ind]
best_softmax = SoftmaxClassifier()
Пример #6
0
# TODO: Split into train, validation and test sets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
X_train, X_val, y_train, y_val = cross_validation.train_test_split(X_train, y_train, test_size=0.1)

# TODO: Use the validation set to tune hyperparameters for softmax classifier
# choose learning rate and regularization strength (use the code from softmax_hw.py)

results = {}
best_val = -1
best_params = None
best_softmax = SoftmaxClassifier()
learning_rates = [0.005]
regularization_strengths = [1]

classifier = SoftmaxClassifier()
accuracy = lambda x, y: np.mean(classifier.predict(x) == y)
for lr in learning_rates:
    for reg in regularization_strengths:
        classifier.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=10000, batch_size=y_train.size / 4)
        val_accuracy = accuracy(X_val, y_val)
        train_accuracy = accuracy(X_train, y_train)
        results[(lr, reg)] = (train_accuracy, val_accuracy)

        print "lr %e reg %e train accuracy: %f val accuracy: %f" % (lr, reg, train_accuracy, val_accuracy)

        if val_accuracy > best_val:
            best_softmax.theta = classifier.theta
            best_val = val_accuracy
            best_params = (lr, reg)

Пример #7
0
#best_params, best_classifier = utils.getBestRegOVA(classify, X_train, y_train, X_val, y_val, regularization_strengths, pen='l1')

"""
#best LR = [15]
learning_rates = [15] # [11,12,13,14,15,16,17,18,19,20]
#Best RS = [1e-3]
regularization_strengths = [2e-3] #[1e-3,2e-3,3e-3,4e-3,5e-3,6e-3,7e-3]
classify = SoftmaxClassifier()
# takes classifier, X_train, y_train, X_val, y_val, learning_rates, regularization_strengths and optionally print_train and print_val
best_params, best_classifier = utils.getBestRegAndLearnSoftMax(classify, X_train, y_train, X_val, y_val, learning_rates, regularization_strengths)
# end
"""
print "\nTraining the classifier..."
best_classifier = SoftmaxClassifier()
best_classifier.train(X_train, y_train, reg=1e1, learning_rate=15)
print np.mean(best_classifier.predict(X_val) == y_val)
print theta.shape
hogimg = best_classifier.theta[1025:1537,:].reshape(8,8,8,10)

utils.visualizeHOGTheta(hogimg)

print "\nMaking the final prediction..."
sys.stdout.flush()
labels = []
ids = []
batch = 50000
for j in range(3, 6):
	print "\nPart ", j + 1, " of 3"
	sys.stdout.flush()
	
	name = "batch_" + str(j) + ".npz"
print 'Gradient difference: %f' % grad_difference

results = {}
best_val = -1
best_softmax = None
#learning_rates = [1e-7, 5e-7, 1e-6, 5e-6]
#regularization_strengths = [ 5e4, 1e5, 5e5, 1e8]
learning_rates = [ 5e-6]
regularization_strengths = [1e5]

cl = SoftmaxClassifier()
loss_hist = []
for lr,rs in itertools.product(learning_rates,regularization_strengths):
    _ = cl.train(X_train, y_train, lr, rs, num_iters = 4000, batch_size = 400, verbose = True)
    loss,_ = cl.loss(X_val,y_val, rs)
    pred_t = cl.predict(X_train)
    pred_v = cl.predict(X_val)
    #embed()
    train_match = np.where(pred_t == y_train)
    train_accuracy = float(len(train_match[0]))/len(y_train)
    val_match = np.where(pred_v == y_val)
    val_accuracy = float(len(val_match[0]))/len(y_val)
    results[(lr,rs)] = (train_accuracy,val_accuracy)
    loss_hist.append(loss)
    # print("For lr,rs = ",lr,rs, "Loss  value = ",loss)

#embed()
ind = np.where(loss_hist == np.min(loss_hist))[0][0]
parameters = list(itertools.product(learning_rates,regularization_strengths))[ind]
best_softmax = SoftmaxClassifier()
_ = best_softmax.train(X_train, y_train, parameters[0], parameters[1], num_iters = 4000, batch_size = 400, verbose = True)