Пример #1
0
 def data(self):
     img_data = torch.stack([
         torch.stack([img.data for img in img_list])
         for img_list in self.img_lists
     ])
     data = tensor(img_data)
     return data
Пример #2
0
def edge_loss(input, target):
    k = tensor([
        [0., -5 / 3, 1],
        [-5 / 3, -5 / 3, 1],
        [1., 1, 1],
    ]).cuda().expand(1, 1, 3, 3) / 6
    return 100 * F.mse_loss(F.conv2d(input, k), F.conv2d(target, k))
Пример #3
0
 def open(self, fn):
     img_data = np.load(fn)
     img_lists = []
     for j in range(img_data.shape[0]):
         imgs = []
         for i in range(img_data.shape[1]):
             imgs.append(Image(tensor(img_data[j, i, :, :][None])))
         img_lists.append(imgs)
     return TransformableLists(img_lists)
Пример #4
0
def run_batches(shift_data=0,
                n_batches=20,
                batch_size=20,
                lr=1.e-1,
                iterations=200,
                zero_grad=True,
                verbose=False,
                plot=True,
                seed=1):

    m = n_batches * batch_size
    x, y, w_true = generate_data(size=m,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))

    losses, weights_1, weights_2 = [], [], []

    batch_idx = -1
    for t in range(iterations):
        # -- batch limits --
        batch_idx += 1
        batch_idx = batch_idx % n_batches
        start = batch_idx * batch_size
        end = start + batch_size
        # ------------------
        x_b = x[start:end].clone()  #  clone makes sure we do not override
        y_b = y[start:end]  # no need to clone this

        loss, w_guess = update_batch(x_b,
                                     y_b,
                                     w_guess,
                                     lr=lr,
                                     zero_grad=zero_grad)
        losses.append(float(loss.detach().numpy()))
        weights_1.append(w_guess.detach().numpy()[0])
        weights_2.append(w_guess.detach().numpy()[1])

        if (t % (iterations // 10) == 0) & verbose:
            print(f'MSE {losses[-1]}')

    if plot:
        plot_summary(x,
                     y,
                     x @ w_guess.detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     title='mini batches')

    return np.array(losses)
Пример #5
0
def generate_data(size=200, shift_data=0, plot=True, seed=1):
    if seed:
        torch.manual_seed(seed)

    x = torch.ones(size, 2)
    x[:, 0].uniform_(-1., 1)
    x[:, 0].add_(shift_data)

    w1 = w1_true
    w2 = w2_true
    w = tensor(w1, w2)

    noise = (torch.rand(size) - 0.5) * (
        shift_data**0.5 + 1
    )  # 0.5 because torch.rand is centeralised around 0.5
    y = (x @ w) + noise

    if plot:
        plt.scatter(x[:, 0], y)
        #plt.plot([0, 1], [0, 1])

    return x, y, w
Пример #6
0
def run_batch(shift_data=0,
              size=400,
              lr=1.e-1,
              iterations=200,
              zero_grad=True,
              verbose=False,
              plot=True,
              seed=1):

    x, y, w_true = generate_data(size=size,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))

    losses, weights_1, weights_2 = [], [], []
    for t in range(iterations):
        loss, w_guess = update_batch(x, y, w_guess, lr=lr, zero_grad=zero_grad)
        losses.append(float(loss.detach().numpy()))
        weights_1.append(w_guess.detach().numpy()[0])
        weights_2.append(w_guess.detach().numpy()[1])

        if (t % (iterations // 10) == 0) & verbose:
            print(f'MSE {losses[-1]}')

    if plot:
        plot_summary(x,
                     y,
                     x @ w_guess.detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     title='one batch')

    return np.array(losses)
Пример #7
0
        }
        df1 = pandas.DataFrame.from_dict(dict1, orient='index')
        df1.columns = ['grad', 'is_leaf', 'requires_grad']
        #print(df1)
    with torch.no_grad():
        vertLatents.sub_(lr * vertLatents.grad)
        vertLatents.grad.zero_()
        horiLatents.sub_(lr * horiLatents.grad)
        horiLatents.grad.zero_()
    return loss.item()


vecLatents = 10
shape = (20, 14)
# random large block of data
blockData = tensor(numpy.random.random_sample(shape))

horiLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((vecLatents,shape[1]))))
vertLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((shape[0],vecLatents))))

lr = 1e-1
horiLatents.requires_grad_(True)
vertLatents.requires_grad_(True)
lossDict = {}
for t in range(10001):
    lossDict[t] = update(horiLatents, vertLatents)
pandas.DataFrame.from_dict(lossDict, orient='index').to_csv('lossDict.csv',
                                                            index=False)
pandas.DataFrame(horiLatents.data.tolist()).to_csv('horiLatents.csv',
Пример #8
0
opioidIn = pandas.read_csv('OpioidMNDataByCounty.csv')
opioidInPivot = opioidIn.pivot(index='County',
                               columns='year',
                               values='TotalPrescripts').reset_index()
opioidInPivot[2017] = numpy.where(opioidInPivot.County == 'ST. LOUIS', 54.4,
                                  opioidInPivot[2017])
colYears = opioidInPivot.columns[(opioidInPivot.columns != 'County')]
opioidInPivot.dropna(axis=0, inplace=True)
opioidInPivot.drop(
    opioidInPivot.index[opioidInPivot.County == 'SAINT LOUIS'].tolist(),
    inplace=True)
opioidInPivot.to_csv('OpioidMNDataByCountyPivot.csv', sep='\t', index=False)
colsNotCounty = opioidInPivot.columns.tolist()
colsNotCounty.remove('County')
opioidTensor = tensor(opioidInPivot[colsNotCounty].values)

shape = opioidTensor.shape
vecLatents = 15

###########################################################################

countyVec = \
        nn.Parameter(tensor(numpy.random.random_sample((shape[0],vecLatents))))
yearVec = \
        nn.Parameter(tensor(numpy.random.random_sample((vecLatents,shape[1]))))

lr = 1e-1
countyVec.requires_grad_(True)
yearVec.requires_grad_(True)
Пример #9
0
 def open(self, fn):
     img_data = np.load(fn)
     return Image(tensor(img_data[None]))
            'horiLatents':
            [horiLatents.grad, horiLatents.is_leaf, horiLatents.requires_grad],
            'y_hat': [y_hat.grad, y_hat.is_leaf, y_hat.requires_grad]
        }
        df1 = pandas.DataFrame.from_dict(dict1, orient='index')
        df1.columns = ['grad', 'is_leaf', 'requires_grad']
        #print(df1)
    with torch.no_grad():
        y_hat.sub_(lr * y_hat.grad)
        y_hat.grad.zero_()


vecLatents = 10
shape = (20, 14)
# random large block of data
blockData = tensor(numpy.random.random_sample(shape))

horiLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((vecLatents,shape[1]))))
vertLatents = \
        nn.Parameter(tensor(numpy.random.random_sample((shape[0],vecLatents))))
'''
a = nn.Parameter(tensor(hypothesis(vertLatents,horiLatents)))
print('a.requires_grad      ------ ',a.requires_grad)
'''

lr = 1e-1
y_hat = tensor(hypothesis(vertLatents, horiLatents))
y_hat.requires_grad_(True)
for t in range(10001):
    update(y_hat)
Пример #11
0
def run_batch_norm(shift_data=0,
                   momentum=0.9,
                   n_batches=10,
                   batch_size=20,
                   lr=1.e-1,
                   iterations=100,
                   zero_grad=True,
                   verbose=False,
                   epsilon=1.e-7,
                   plot=True,
                   title='batch norm - momentum ',
                   seed=1):

    m = n_batches * batch_size
    x, y, w_true = generate_data(size=m,
                                 shift_data=shift_data,
                                 plot=False,
                                 seed=seed)

    w_guess = nn.Parameter(tensor(-1., 1))
    gamma = nn.Parameter(tensor(-1., 1.))
    beta = nn.Parameter(tensor(-1., 1.))

    losses, weights_1, weights_2 = [], [], []

    batch_idx = -1
    for t in range(iterations):  #(batch_size * n_batches // 2):
        # -- batch limits --
        batch_idx += 1
        batch_idx = batch_idx % n_batches
        start = batch_idx * batch_size
        end = start + batch_size
        # ------------------
        x_b = x[start:end].clone()  #  clone makes sure we do not override
        y_b = y[start:end]  # no need to clone this

        if not momentum:
            mu, var = None, None
            if (t == 0):
                title += '0'
        else:
            if (t == 0):
                mu = x_b.mean(axis=0)[0]
                var = x_b.var(axis=0)[0]
                title += f'{momentum}'

        loss, w_guess, mu, var = update_batch_norm(x_b,
                                                   y_b,
                                                   w_guess,
                                                   mu,
                                                   var,
                                                   gamma,
                                                   beta,
                                                   momentum=momentum,
                                                   lr=lr,
                                                   zero_grad=zero_grad,
                                                   epsilon=epsilon)
        w1_guess, w2_guess = _params_to_weights(gamma.detach().numpy(),
                                                beta.detach().numpy(),
                                                w_guess.detach().numpy(),
                                                mu.detach().numpy(),
                                                var.detach().numpy())

        losses.append(float(loss.detach().numpy()))
        weights_1.append(w1_guess)
        weights_2.append(w2_guess)
        #print(gamma.detach().numpy(), beta.detach().numpy(), w_guess.detach().numpy(), mu.detach().numpy(), var.detach().numpy())
        if (t % 10 == 0) & verbose:
            print(f'MSE {losses[-1]}')

    x_ = x.clone()
    x_[:, 0].sub_(mu).div_((var + epsilon)**0.5)
    z_ = x_ * gamma + beta

    if plot:
        plot_summary(x,
                     y, (z_ @ w_guess).detach().numpy(),
                     losses,
                     weights_1,
                     weights_2,
                     shift_data=shift_data,
                     no_weights=False,
                     title=title)

    return np.array(losses)