Exemplo n.º 1
0
    def __call__(self, model: NNLayer, tx, y, **kwargs):
        """
        Performs Stochastic Gradient Descent.
        :param tx: sample
        :param y: labels
        :param batch_size: size of the batches
        :param num_batches: number of batches to learn
        :param loss: loss function
        :param lr: learning rate
        :param epoch: number of times to go over the dataset
        """
        batch_size = kwargs['batch_size'] if 'batch_size' in kwargs else 1
        num_batches = min(kwargs['num_batches'],
                          tx.shape[0]) if 'num_batches' in kwargs else 1
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 100

        activation = model.get_activation_function()

        for epoch in range(epochs):
            running_loss = 0
            for batch_y, batch_tx in batch_iter(y, tx, batch_size,
                                                num_batches):
                txw = np.dot(batch_tx, model.get_params())
                model.set_param(model.get_params() - lr * np.dot(
                    np.transpose(batch_tx, (1, 0)),
                    loss.gradient(activation(batch_tx), batch_y) *
                    activation.gradient(batch_tx)))
                print(loss(activation(txw), batch_y))
            print(running_loss)
Exemplo n.º 2
0
    def __call__(self, model: LinearModel, tx, y, **kwargs):
        """
        Performs Stochastic Gradient Descent.
        :param tx: sample
        :param y: labels
        :param max_iter: number of batches to learn
        :param loss: loss function
        :param lr: learning rate
        """
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 100

        for epoch in range(epochs):
            gradient = np.dot(np.transpose(tx, (1, 0)),
                              loss.gradient(model(tx), y))
            model.set_param(model.get_params() - lr * gradient)

            if np.sum(
                    np.abs(gradient)) < lr * 10**-2 / model.get_params().size:
                break
Exemplo n.º 3
0
    def __call__(self, model: LinearModel, tx, y, **kwargs):
        """
        Performs Stochastic Gradient Descent.
        :param tx: sample
        :param y: labels
        :param batch_size: size of the batches
        :param num_batches: number of batches to learn
        :param loss: loss function
        :param lr: learning rate
        :param epoch: number of times to go over the dataset
        """
        batch_size = kwargs['batch_size'] if 'batch_size' in kwargs else 1
        num_batches = min(kwargs['num_batches'],
                          tx.shape[0]) if 'num_batches' in kwargs else 1000
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 100
        epoch_step = kwargs['epoch_step'] if 'epoch_step' in kwargs else (50,
                                                                          0.75)

        i = 0
        running_loss = 0
        for step in range(int(epochs / epoch_step[0])):
            for epoch_iter in range(epoch_step[0]):
                running_loss = 0
                acc_grad = 0

                for batch_y, batch_tx in batch_iter(y, tx, batch_size,
                                                    num_batches):
                    out = model(batch_tx)
                    running_loss += loss(out, batch_y)
                    grad = np.dot(np.transpose(batch_tx, (1, 0)),
                                  loss.gradient(model(batch_tx), batch_y))
                    model.set_param(model.get_params() - lr * grad)
                    acc_grad += np.sum(np.abs(grad))

                if acc_grad < lr * 10**-2 / model.get_params().size:
                    return
            i += 1
            lr *= epoch_step[1]
Exemplo n.º 4
0
    def __call__(self, model: NNLayer, tx, y, **kwargs):
        """
        Performs Gradient Descent.
        :param tx: sample
        :param y: labels
        :param epochs: number of timesto go through the dataset
        :param loss: loss function
        :param lr: learning rate
        """
        loss = kwargs['loss'] if 'loss' in kwargs else LogCosh()
        lr = kwargs['lr'] if 'lr' in kwargs else .01
        epochs = kwargs['epochs'] if 'epochs' in kwargs else 1000

        activation = model.get_activation_function()

        for i in range(epochs):
            txw = np.dot(tx, model.get_params())
            grad = lr * np.dot(
                np.transpose(tx, (1, 0)),
                loss.gradient(activation(txw), y) * activation.gradient(txw))
            model.set_param(model.get_params() - lr * np.dot(
                np.transpose(tx, (1, 0)),
                loss.gradient(activation(txw), y) * activation.gradient(txw)))
            print(loss(activation(txw), y))
Exemplo n.º 5
0
        spine.set_visible(False)

    ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)
    ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)
    ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
    ax.tick_params(which="minor", bottom=False, left=False)

    return im, cbar


if __name__ == "__main__":
    path = os.path.split(
        os.path.split(os.path.dirname(os.path.abspath(__file__)))[0])[0]
    data = np.load(file=path + '\\resources\\' + 'train.npy')

    loss = LogCosh()
    model = LinearModel((3, 1))
    kwargs = {
        'batch_size': 25,
        'loss': loss,
        'lr': 10**-1,
        'epochs': 1000,
        'epoch_step': (100, .75)
    }
    optimizer = LinearSGD()
    n_models = 1

    for i in range(32, data.shape[1]):
        for j in range(i + 1, data.shape[1]):
            if i in [8, 19, 28] or j in [8, 19, 28]:
                continue
Exemplo n.º 6
0
    n_features = np.sum(mask)
    models = [LinearModel((n_features + 1, 1)),
              LinearModel((n_features + 1, 1)),
              Logistic(n_features + 1),
              LinearModel((n_features + 1, 1)),
              LinearModel((n_features + 1, 1))]

    optimizers = [Ridge(),
                  LS(),
                  LinearSGD(),
                  LinearGD(),
                  LogisticSGD()]

    optimizer_kwargs = [[{'lambda_': 0.5}],
                        [{}],
                        [{'batch_size': 25, 'loss': LogCosh(), 'lr': 10**-1, 'epochs': 1000, 'regularize': r}
                         for r in range(2)],
                        [{'batch_size': 25, 'loss': LogCosh(), 'lr': 10**-1, 'epochs': 1000}],
                        []]

    normalizers = [
                   MinMaxNormalizer()
                   # , GaussianNormalizer()
                   # , DecimalScaling()
                  ]

    lrf = LinearRegressionFilling(data[:, mask], epochs=100)
    lrf.load(path + '/src/preconditioning/regression_filler_params.npy')

    filling_data = [
                    MeanFilling(data[:, mask]),