Exemplo n.º 1
0
def run_batches(shift_data=0,
                n_batches=20,
                batch_size=20,
                lr=1.e-1,
                iterations=200,
                zero_grad=True,
                verbose=False,
                plot=True,
                seed=1):

    m = n_batches * batch_size
    x, y, w_true = generate_data(size=m,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))

    losses, weights_1, weights_2 = [], [], []

    batch_idx = -1
    for t in range(iterations):
        # -- batch limits --
        batch_idx += 1
        batch_idx = batch_idx % n_batches
        start = batch_idx * batch_size
        end = start + batch_size
        # ------------------
        x_b = x[start:end].clone()  #  clone makes sure we do not override
        y_b = y[start:end]  # no need to clone this

        loss, w_guess = update_batch(x_b,
                                     y_b,
                                     w_guess,
                                     lr=lr,
                                     zero_grad=zero_grad)
        losses.append(float(loss.detach().numpy()))
        weights_1.append(w_guess.detach().numpy()[0])
        weights_2.append(w_guess.detach().numpy()[1])

        if (t % (iterations // 10) == 0) & verbose:
            print(f'MSE {losses[-1]}')

    if plot:
        plot_summary(x,
                     y,
                     x @ w_guess.detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     title='mini batches')

    return np.array(losses)
Exemplo n.º 2
0
def run_batch(shift_data=0,
              size=400,
              lr=1.e-1,
              iterations=200,
              zero_grad=True,
              verbose=False,
              plot=True,
              seed=1):

    x, y, w_true = generate_data(size=size,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))

    losses, weights_1, weights_2 = [], [], []
    for t in range(iterations):
        loss, w_guess = update_batch(x, y, w_guess, lr=lr, zero_grad=zero_grad)
        losses.append(float(loss.detach().numpy()))
        weights_1.append(w_guess.detach().numpy()[0])
        weights_2.append(w_guess.detach().numpy()[1])

        if (t % (iterations // 10) == 0) & verbose:
            print(f'MSE {losses[-1]}')

    if plot:
        plot_summary(x,
                     y,
                     x @ w_guess.detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     title='one batch')

    return np.array(losses)
Exemplo n.º 3
0
        #print(df1)
    with torch.no_grad():
        vertLatents.sub_(lr * vertLatents.grad)
        vertLatents.grad.zero_()
        horiLatents.sub_(lr * horiLatents.grad)
        horiLatents.grad.zero_()
    return loss.item()


vecLatents = 10
shape = (20, 14)
# random large block of data
blockData = tensor(numpy.random.random_sample(shape))

horiLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((vecLatents,shape[1]))))
vertLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((shape[0],vecLatents))))

lr = 1e-1
horiLatents.requires_grad_(True)
vertLatents.requires_grad_(True)
lossDict = {}
for t in range(10001):
    lossDict[t] = update(horiLatents, vertLatents)
pandas.DataFrame.from_dict(lossDict, orient='index').to_csv('lossDict.csv',
                                                            index=False)
pandas.DataFrame(horiLatents.data.tolist()).to_csv('horiLatents.csv',
                                                   index=False)
pandas.DataFrame(vertLatents.data.tolist()).to_csv('vertLatents.csv',
                                                   index=False)
Exemplo n.º 4
0
def run_batch_norm(shift_data=0,
                   momentum=0.9,
                   n_batches=10,
                   batch_size=20,
                   lr=1.e-1,
                   iterations=100,
                   zero_grad=True,
                   verbose=False,
                   epsilon=1.e-7,
                   plot=True,
                   title='batch norm - momentum ',
                   seed=1):

    m = n_batches * batch_size
    x, y, w_true = generate_data(size=m,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))
    gamma = nn.Parameter(tensor(-1., 1.))
    beta = nn.Parameter(tensor(-1., 1.))

    losses, weights_1, weights_2 = [], [], []

    batch_idx = -1
    for t in range(iterations):  #(batch_size * n_batches // 2):
        # -- batch limits --
        batch_idx += 1
        batch_idx = batch_idx % n_batches
        start = batch_idx * batch_size
        end = start + batch_size
        # ------------------
        x_b = x[start:end].clone()  #  clone makes sure we do not override
        y_b = y[start:end]  # no need to clone this

        if not momentum:
            mu, var = None, None
            if (t == 0):
                title += '0'
        else:
            if (t == 0):
                mu = x_b.mean(axis=0)[0]
                var = x_b.var(axis=0)[0]
                title += f'{momentum}'

        loss, w_guess, mu, var = update_batch_norm(x_b,
                                                   y_b,
                                                   w_guess,
                                                   mu,
                                                   var,
                                                   gamma,
                                                   beta,
                                                   momentum=momentum,
                                                   lr=lr,
                                                   zero_grad=zero_grad,
                                                   epsilon=epsilon)
        w1_guess, w2_guess = _params_to_weights(gamma.detach().numpy(),
                                                beta.detach().numpy(),
                                                w_guess.detach().numpy(),
                                                mu.detach().numpy(),
                                                var.detach().numpy())

        losses.append(float(loss.detach().numpy()))
        weights_1.append(w1_guess)
        weights_2.append(w2_guess)
        #print(gamma.detach().numpy(), beta.detach().numpy(), w_guess.detach().numpy(), mu.detach().numpy(), var.detach().numpy())
        if (t % 10 == 0) & verbose:
            print(f'MSE {losses[-1]}')

    x_ = x.clone()
    x_[:, 0].sub_(mu).div_((var + epsilon)**0.5)
    z_ = x_ * gamma + beta

    if plot:
        plot_summary(x,
                     y, (z_ @ w_guess).detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     no_weights=False,
                     title=title)

    return np.array(losses)