Пример #1
0
def auto_get_parameters(X_train, y_train, X_val, y_val):
    learning_rates = [1e-7, 5e-5]
    regularization_strengths = [5e4, 1e5]

    best_val = -1
    best_parameters = None

    for i in learning_rates:
        for j in regularization_strengths:
            softmax = Softmax()
            softmax.train(X_train, y_train, j, i, 200, 1500, True)
            y_pred = softmax.predict(X_val)
            acc = np.mean(y_pred == y_val)
            if acc > best_val:
                best_val = acc
                best_parameters = (i, j)

    print('OK! Have been identified parameter! Best validation accuracy achieved during cross-validation: %f' % best_val)
    return best_parameters
Пример #2
0
    for epoch in range(0, max_epoch):

        iter_per_batch = train_set.num_examples // batch_size

        for batch_id in range(0, iter_per_batch):
            # get the data of next minibatch (have been shuffled)
            batch = train_set.next_batch(batch_size)
            X, label = batch
            label = normalize(label)

            # Compute loss and gradient
            loss, grad = classifier.vectorized_loss(X, label, reg)
            loss_history.append(loss)

            # Generate Predictions
            y_train_pred = classifier.predict(X)
            y_train_acc = np.mean(y_train_pred == label)
            acc_history.append(y_train_acc)

            # update weights
            classifier.update_weights(grad)

            # print("ITER: {}, LOSS: {}, ACC: {}".format(batch_id,loss_history[-1],acc_history[-1]))

        print("ITER: {}, LOSS: {}, ACC: {}".format(epoch, loss_history[-1],
                                                   acc_history[-1]))

    # Test Case
    print(">>> Computing the accuracy of the model on the test set.")

    y_test_label = normalize(test_set.labels)
Пример #3
0
import numpy as np
from softmax import Softmax
from sklearn.datasets import load_iris

data = load_iris()
X = data.data
y = data.target
reg_strength = 1e-4
batch_size = 50
epochs = 1000
learning_rate = 5e-1
weight_update = 'sgd_with_momentum'
sm = Softmax(batch_size=batch_size,
             epochs=epochs,
             learning_rate=learning_rate,
             reg_strength=reg_strength,
             weight_update=weight_update)
sm.train(X, y)
pred = sm.predict(X)
print np.mean(np.equal(y, pred))
################################################################################
#                            任务:                                             #
#               使用全部训练数据训练一个最佳softmax                            #
################################################################################
learning_rates = [1.4e-7, 1.45e-7, 1.5e-7, 1.55e-7, 1.6e-7]
regularization_strengths = [2.3e4, 2.6e4, 2.7e4, 2.8e4, 2.9e4]
for l in learning_rates:
    for r in regularization_strengths:
        softmax = Softmax()
        loss_hist = softmax.train(X_train,
                                  y_train,
                                  learning_rate=l,
                                  reg=r,
                                  num_iters=2000,
                                  verbose=True)
        y_train_pred = softmax.predict(X_train)
        train_accuracy = np.mean(y_train == y_train_pred)
        y_val_pred = softmax.predict(X_val)
        val_accuracy = np.mean(y_val == y_val_pred)
        results[(l, r)] = (train_accuracy, val_accuracy)
        if (best_val < val_accuracy):
            best_val = val_accuracy
            best_softmax = softmax
################################################################################
#                            结束编码                                          #
################################################################################

for lr, reg in sorted(results):
    train_accuracy, val_accuracy = results[(lr, reg)]
    print('lr %e reg %e train accuracy: %f val accuracy: %f' %
          (lr, reg, train_accuracy, val_accuracy))
Пример #5
0
class Model:
    def __init__(self, layers_dim):
        self.layers = []
        self.layerdim = layers_dim
        self.SM = Softmax()
        self.MB = MiniBatch()

        for i in range(len(layers_dim) - 1):
            #For TanH activation ensure weights are positive
            #wx = np.random.randint(0, 100, size=(layers_dim[i],  layers_dim[i+1])) / 10000
            #bx = np.atleast_2d(np.array([np.random.randint(0, 100) / 1000 for i in range(layers_dim[i+1])]))

            #For leaky relu we want both positive and negative weights
            wx = np.random.randn(layers_dim[i], layers_dim[i + 1]) / np.sqrt(
                layers_dim[i])
            bx = np.atleast_2d(
                np.random.randn(layers_dim[i + 1]).reshape(
                    1, layers_dim[i + 1]))
            self.layers.append(Layer(wx, bx))

    def calculate_loss(self, X, y):
        output = X
        for i in self.layers:
            output = i.forward(output)
        return self.SM.loss(output, y)

    def predict(self, X):
        output = X
        for i in self.layers:
            output = i.forward(output)
        probs = self.SM.predict(output)
        return np.argmax(probs, axis=1)

    def train(self,
              X,
              y,
              num_passes=1000,
              epsilon=0.01,
              reg_lambda=0.01,
              print_loss=False):
        epochLoss = []
        t = 0
        beta1 = 0.9
        beta2 = 0.999
        for epoch in range(num_passes):
            # Forward propagation
            minibatches = self.MB.mini_batches(X, y, 50)

            batchLoss = []
            for i, minibatch in enumerate(minibatches):

                (minibatch_X, minibatch_Y) = minibatch

                output = minibatch_X

                for i in self.layers:
                    output = i.forward(output)

                delta = self.SM.diff(output, minibatch_Y)
                loss = self.SM.loss(output, minibatch_Y)
                batchLoss.append(loss)

                for i, layer in enumerate(reversed(self.layers)):
                    t = t + 1
                    ix = len(self.layers) - i
                    delta = layer.backward(ix, delta, epsilon, reg_lambda,
                                           beta1, beta2, t)

            epochLoss.append(np.mean(batchLoss))

            if print_loss and epoch % 50 == 0:
                print("Loss after iteration %i: %f" %
                      (epoch, self.calculate_loss(X, y)))

        return epochLoss
Пример #6
0
import numpy as np
from softmax import Softmax
from sklearn.datasets import load_iris

data = load_iris()
X = data.data
y = data.target
reg_strength = 1e-4
batch_size = 50
epochs = 1000
learning_rate = 5e-1
weight_update = 'sgd_with_momentum'
sm = Softmax(batch_size=batch_size, epochs=epochs, learning_rate=learning_rate, reg_strength=reg_strength, weight_update=weight_update)
sm.train(X, y)
pred = sm.predict(X)
print np.mean(np.equal(y, pred))