Example #1
0
    def __call__(self, model: LinearModel, tx, y, **kwargs):
        """
        Performs Stochastic Gradient Descent.
        :param tx: sample
        :param y: labels
        :param max_iter: number of batches to learn
        :param loss: loss function
        :param lr: learning rate
        """
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 100

        for epoch in range(epochs):
            gradient = np.dot(np.transpose(tx, (1, 0)),
                              loss.gradient(model(tx), y))
            model.set_param(model.get_params() - lr * gradient)

            if np.sum(
                    np.abs(gradient)) < lr * 10**-2 / model.get_params().size:
                break
Example #2
0
def least_squares(y, tx):
    np.random.seed(0)
    data = np.hstack([np.reshape(y, (-1, 1)), tx])

    model = LinearModel((tx.shape[1], y.shape[1]))
    optimizer = LS()

    training, test = split(data)
    optimizer(model, training[:, y.shape[1]:], training[:, :y.shape[1]])
    error = MSE()(model(test[:, y.shape[1]:]), test[:, :y.shape[1]])

    return model.get_params(), error
Example #3
0
    def __call__(self, model: LinearModel, tx, y, **kwargs):
        """
        Performs Stochastic Gradient Descent.
        :param tx: sample
        :param y: labels
        :param batch_size: size of the batches
        :param num_batches: number of batches to learn
        :param loss: loss function
        :param lr: learning rate
        :param epoch: number of times to go over the dataset
        """
        batch_size = kwargs['batch_size'] if 'batch_size' in kwargs else 1
        num_batches = min(kwargs['num_batches'],
                          tx.shape[0]) if 'num_batches' in kwargs else 1000
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 100
        epoch_step = kwargs['epoch_step'] if 'epoch_step' in kwargs else (50,
                                                                          0.75)

        i = 0
        running_loss = 0
        for step in range(int(epochs / epoch_step[0])):
            for epoch_iter in range(epoch_step[0]):
                running_loss = 0
                acc_grad = 0

                for batch_y, batch_tx in batch_iter(y, tx, batch_size,
                                                    num_batches):
                    out = model(batch_tx)
                    running_loss += loss(out, batch_y)
                    grad = np.dot(np.transpose(batch_tx, (1, 0)),
                                  loss.gradient(model(batch_tx), batch_y))
                    model.set_param(model.get_params() - lr * grad)
                    acc_grad += np.sum(np.abs(grad))

                if acc_grad < lr * 10**-2 / model.get_params().size:
                    return
            i += 1
            lr *= epoch_step[1]
Example #4
0
def ridge_regression(y, tx, lambda_):
    np.random.seed(0)
    data = np.hstack([np.reshape(y, (-1, 1)), tx])

    model = LinearModel((tx.shape[1], y.shape[1]))
    optimizer = Ridge()

    training, test = split(data)

    optimizer(model,
              training[:, y.shape[1]:],
              training[:, :y.shape[1]],
              lambda_=lambda_)
    error = MSE()(model(test[:, y.shape[1]:]), test[:, :y.shape[1]])

    return model.get_params(), error
Example #5
0
def logistic_regression(y, tx, initial_w, max_iters, gamma):
    np.random.seed(0)
    data = np.hstack([np.reshape(y, (-1, 1)), tx])

    model = LinearModel((tx.shape[1], y.shape[1]))
    optimizer = LS()

    training, test = split(data)
    model.set_param(initial_w)
    optimizer(model,
              training[:, y.shape[1]:],
              training[:, :y.shape[1]],
              epochs=max_iters,
              epoch_step=(max_iters, 1),
              num_batches=1,
              batch_size=1,
              lr=gamma,
              regularize=0)
    error = MSE()(model(test[:, y.shape[1]:]), test[:, :y.shape[1]])

    return model.get_params(), error
Example #6
0
            for it in range(n_models):
                model.set_param(np.random.uniform(-1, 1, (3, 1)))
                train, test = split(MinMaxNormalizer()(data[np.logical_and(
                    data[:, i] != -999, data[:, j] != -999)]))

                x = np.hstack([train[:, [i, j]], np.ones((train.shape[0], 1))])
                y = np.reshape(train[:, 1], (-1, 1))
                input = np.hstack(
                    [test[:, [i, j]],
                     np.ones((test.shape[0], 1))])

                optimizer(model, x, y, **kwargs)
                error = loss(model(input), np.reshape(test[:, 2], (-1, 1)))
                if error < min_error:
                    min_error = error
                    best = model.get_params()

            np.save(arr=best,
                    file='./saved/' + str(i) + 'and' + str(j) + '_' +
                    str("%0.2f" % min_error) + '.npy')

    onlyfiles = [
        f for f in os.listdir('./saved/')
        if os.path.isfile(os.path.join('./saved/', f))
    ]
    n_features = data.shape[1] - 2
    error_matrix = np.ones((n_features, n_features)) * 0
    for f in onlyfiles:
        split_str = f.split('and')
        first = int(split_str[0]) - 2
        split_str = split_str[1].split('_')