def val(model, criterion, loader, meta, amp):
    model.eval()
    accuracy = []
    mean_loss = 0
    for batch_idx, data in enumerate(loader):
        x = data['image']
        y = data['classes']
        x = x.to('cuda')
        if amp != '01':
            x = x.half()
        y = y.to('cuda')

        y_hat = model(x)
        probs = torch.sigmoid(y_hat)
        loss = criterion(y_hat, y)

        prediction = probs.flatten() > 0.5
        gt = y.flatten() > 0.5
        accuracy.extend(t2np(prediction) == t2np(gt))
        mean_loss += t2np(loss).mean()

    accuracy = np.mean(accuracy)
    mean_loss = np.mean(mean_loss / len(loader))

    meta['val']['loss'].append(mean_loss)
    meta['val']['accuracy'].append(accuracy)
    meta['val']['iteration'].append(meta['iteration'][-1])

    print('\nval loss {:^.3f} | val accuracy {:^.3f}'.format(
        meta['val']['loss'][-1], meta['val']['accuracy'][-1]))
Example #2
0
def val(epoch):
    net.eval()
    val_loss = 0
    for batch_idx, (inputs, loc_targets, cls_targets) in enumerate(valloader):
        inputs = inputs.to('cuda')
        loc_targets = loc_targets.to('cuda')
        cls_targets = cls_targets.to('cuda')

        loc_preds, cls_preds = net(inputs)

        # pos = (cls_targets > 0)

        loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets,
                         batch_idx % 1 == 0)

        loss, inputs, loc_targets, cls_targets = t2np(loss), t2np(
            inputs), t2np(loc_targets), t2np(cls_targets)

        val_loss += loss.mean()
        if batch_idx % 1 == 0:
            print('val_loss: %.4f | avg_loss: %.4f' % (loss.mean(), val_loss /
                                                       (batch_idx + 1)))
            import sys
            sys.stdout.flush()
    print('val_loss: %.4f | avg_loss: %.4f' % (loss.mean(), val_loss /
                                               (batch_idx + 1)))

    save_checkpoint(val_loss, epoch, len(valloader))
Example #3
0
def visualize2DGrad():
    plt.figure(1)
    plt.subplot(121)
    plt.imshow(utils.t2np(grad[0, :, :, 0]))  #, clim=(-1, 1))
    plt.title('gradX')
    plt.colorbar()
    plt.subplot(122)
    plt.imshow(utils.t2np(grad[0, :, :, 1]))  #, clim=(-1, 1))
    plt.title('gradY')
    plt.colorbar()
    plt.show()
Example #4
0
def visualize1D():
    plt.figure(1)
    plt.subplot(121)
    plt.plot(I0[0, :, 0])
    plt.subplot(122)
    plt.plot(utils.t2np(out[0, :, 0]))
    plt.show()
Example #5
0
def train(epoch):
    from time import time

    print('\nTrain Epoch: %d' % epoch)
    net.train()
    train_loss = 0

    tik = time()
    tok = time()

    for batch_idx, (inputs, loc_targets,
                    cls_targets) in enumerate(trainloader):
        #         if True:
        #             print(cls_targets.unique())
        #             print(inputs.shape)
        # print(inputs)
        inputs = inputs.to('cuda')
        loc_targets = loc_targets.to('cuda')
        cls_targets = cls_targets.to('cuda')
        # print(cls_targets[:10])

        optimizer.zero_grad()

        loc_preds, cls_preds = net(inputs)
        # print(cls_preds)

        # pos = (cls_targets > 0).detach()
        # num_pos = t2np(pos).astype(np.int).sum()
        # if num_pos==0:
        #     print('zero num positive')
        #     continue

        # print('loc_preds', loc_preds.shape)
        # print('cls_preds', cls_preds.shape)
        # print('loc_targets', loc_targets.shape)
        # print('cls_targets', cls_targets.shape)
        # print('loc_preds', loc_preds.shape)
        loss = criterion(loc_preds, loc_targets, cls_preds, cls_targets,
                         batch_idx % 20 == 0)

        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()


#         nn.utils.clip_grad_norm(net.parameters(), max_norm=1.0)
        optimizer.step()
        lr_s.step()
        train_loss += t2np(loss).mean()

        if batch_idx % 20 == 0:
            tok = time()
            print(
                'batch: %d/%d | time: %.2f min | train_loss: %.3f | avg_loss: %.4f | lr: %.6f '
                % (batch_idx, len(trainloader),
                   (tok - tik) / 60, loss.detach().to('cpu').numpy().mean(),
                   train_loss / (batch_idx + 1), lr_s.get_lr()[0]))
            import sys
            sys.stdout.flush()
            tik = time()
Example #6
0
def visualize2D():
    plt.figure(1)
    plt.subplot(121)
    plt.imshow(I0[0, :, :, 0], clim=(-1, 1))
    plt.colorbar()
    plt.subplot(122)
    plt.imshow(utils.t2np(out[0, :, :, 0]), clim=(-1, 1))
    plt.colorbar()
    plt.show()
Example #7
0
def visualize3DGrad():
    plt.figure(1)

    plt.subplot(331)
    plt.imshow(utils.t2np(grad[0, coorCenter, :, :, 0]))  #, clim=(-1, 1))
    plt.title('gradX - xslice')
    plt.colorbar()
    plt.subplot(332)
    plt.imshow(utils.t2np(grad[0, :, coorCenter, :, 0]))  #, clim=(-1, 1))
    plt.title('gradX - yslice')
    plt.colorbar()
    plt.subplot(333)
    plt.imshow(utils.t2np(grad[0, :, :, coorCenter, 0]))  #, clim=(-1, 1))
    plt.title('gradX - zslice')
    plt.colorbar()

    plt.subplot(334)
    plt.imshow(utils.t2np(grad[0, coorCenter, :, :, 1]))  # , clim=(-1, 1))
    plt.title('gradY - xslice')
    plt.colorbar()
    plt.subplot(335)
    plt.imshow(utils.t2np(grad[0, :, coorCenter, :, 1]))  # , clim=(-1, 1))
    plt.title('gradY - yslice')
    plt.colorbar()
    plt.subplot(336)
    plt.imshow(utils.t2np(grad[0, :, :, coorCenter, 1]))  # , clim=(-1, 1))
    plt.title('gradY - zslice')
    plt.colorbar()

    plt.subplot(337)
    plt.imshow(utils.t2np(grad[0, coorCenter, :, :, 2]))  # , clim=(-1, 1))
    plt.title('gradZ - xslice')
    plt.colorbar()
    plt.subplot(338)
    plt.imshow(utils.t2np(grad[0, :, coorCenter, :, 2]))  # , clim=(-1, 1))
    plt.title('gradZ - yslice')
    plt.colorbar()
    plt.subplot(339)
    plt.imshow(utils.t2np(grad[0, :, :, coorCenter, 2]))  # , clim=(-1, 1))
    plt.title('gradZ - zslice')
    plt.colorbar()

    plt.show()
Example #8
0
def visualize3D():
    plt.figure(1)
    plt.subplot(231)
    plt.imshow(I0[0, coorCenter, :, :, 0], clim=(-1, 1))
    plt.colorbar()
    plt.subplot(232)
    plt.imshow(I0[0, :, coorCenter, :, 0], clim=(-1, 1))
    plt.colorbar()
    plt.subplot(233)
    plt.imshow(I0[0, :, :, coorCenter, 0], clim=(-1, 1))
    plt.colorbar()

    plt.subplot(234)
    plt.imshow(utils.t2np(out[0, coorCenter, :, :, 0]), clim=(-1, 1))
    plt.colorbar()
    plt.subplot(235)
    plt.imshow(utils.t2np(out[0, :, coorCenter, :, 0]), clim=(-1, 1))
    plt.colorbar()
    plt.subplot(236)
    plt.imshow(utils.t2np(out[0, :, :, coorCenter, 0]), clim=(-1, 1))
    plt.colorbar()

    plt.show()
def train_epoch(model, criterion, optim, lr_scheduler, loader, meta):
    model.train()
    tik = time()
    for batch_idx, data in enumerate(loader):
        x = data['image']
        y = data['classes']
        x, y = x.to('cuda'), y.to('cuda')

        optim.zero_grad()
        y_hat = model(x)
        probs = torch.functional.F.sigmoid(y_hat)
        loss = criterion(y_hat, y)

        with amp.scale_loss(loss, optim) as scaled_loss:
            scaled_loss.backward()

        optim.step()
        lr_scheduler.step()

        prediction = probs.flatten() > 0.5
        gt = y.flatten() > 0.5

        accuracy = (t2np(prediction) == t2np(gt)).mean()
        meta['iteration'].append(meta['iteration'][-1] + 1)
        meta['lr'].append(lr_scheduler.get_lr()[0])
        meta['train']['loss'].append(t2np(loss).mean())
        meta['train']['accuracy'].append(accuracy)

        if batch_idx % 100 == 0:
            tok = time()
            print(
                'batch {:#5}/{:^5} | loss {:^.3f} | accuracy {:^.3f} | lr {:^.6f} | time {:^.2f}s'
                .format(batch_idx, len(loader), meta['train']['loss'][-1],
                        meta['train']['accuracy'][-1], meta['lr'][-1],
                        (tok - tik)))
            sys.stdout.flush()
            tik = time()
Example #10
0
    def forward(self, loc_preds, loc_targets, cls_preds, cls_targets ,verbose):
        batch_size, n_anchors = cls_targets.size()
        fg_idx = cls_targets > 0
        n_fg = fg_idx.float().sum()

        fg_mask = fg_idx.unsqueeze(2).expand_as(loc_preds)

        fg_loc_preds = loc_preds[fg_mask]
        fg_loc_targets = loc_targets[fg_mask]
        loc_loss = 0.4 * F.smooth_l1_loss(fg_loc_preds, fg_loc_targets, reduction='none').sum()

        fbg_idx = cls_targets != -1

        # print('cls_targets shape', cls_targets.shape)
        fbg_mask = fbg_idx.expand_as(cls_targets)
        # print('fgb_mask shape', fbg_mask.shape)
        fgb_cls_preds = cls_preds[fbg_mask].squeeze()
        fgb_cls_targets = cls_targets[fbg_mask]

        # print('fgb_cls_preds', fgb_cls_preds.shape)
        # print('fgb_cls_targets', fgb_cls_targets.shape)

        ohe_targets = self._ohe(fgb_cls_targets)
        # print(ohe_targets[:10])
        # print(ohe_targets.shape)
        # print(fgb_cls_preds.shape)

        cls_loss = self.focal_loss(fgb_cls_preds, ohe_targets)

        if verbose:
            np_loc_loss = t2np(loc_loss)
            np_cls_loss = t2np(cls_loss)
            np_n_fg = t2np(n_fg)
            # np_n_fg = batch_size
            print('loc_loss: %.5f | cls_loss: %.4f' % (np_loc_loss / np_n_fg, np_cls_loss / np_n_fg), end=' | ')

        return (loc_loss + cls_loss) / n_fg
Example #11
0
import utils

dim = 1
sz = np.tile(30, dim)  # size of the desired images: (sz)^dim

params = dict()
params['len_s'] = sz.min() // 6
params['len_l'] = sz.min() // 3

# create a default image size with two sample squares
cs = eg.CreateSquares(sz)
I0, I1 = cs.create_image_pair(params)

# create the source and target image as pyTorch variables
ISource = torch.from_numpy(I0)
ITarget = torch.from_numpy(I1)

# spacing so that everything is in [0,1]^2 for now
spacing = 1. / (sz - 1)
print('Spacing = ' + str(spacing))

s = sf.SmootherFactory(sz, spacing).create_smoother('diffusion', {'iter': 10})
r = s.smooth_scalar_field(ISource)

plt.figure(1)
plt.plot(utils.t2np(ISource))
plt.plot(utils.t2np(r))
plt.plot(utils.t2np(ISource))

plt.show()
Example #12
0
start = time.time()

for iter in range(100):

    def closure():
        optimizer.zero_grad()
        # Forward pass: Compute predicted y by passing x to the model
        IWarped = model(ISource)
        # Compute loss
        loss = criterion(IWarped, ITarget)
        loss.backward()
        return loss

    optimizer.step(closure)
    cIWarped = model(ISource)

    if iter % 1 == 0:
        energy, similarityEnergy, regEnergy = criterion.get_energy(
            cIWarped, ITarget)
        print(
            'Iter {iter}: E={energy}, similarityE={similarityE}, regE={regE}'.
            format(iter=iter,
                   energy=utils.t2np(energy),
                   similarityE=utils.t2np(similarityEnergy),
                   regE=utils.t2np(regEnergy)))

    if iter % 10 == 0:
        vizReg.showCurrentImages(iter, ISource, ITarget, cIWarped)

print('time:', time.time() - start)
Example #13
0
# now create the grids
xvals = np.array(np.linspace(-1, 1, sz))
yvals = np.array(np.linspace(-1, 1, sz))

YY, XX = np.meshgrid(xvals, yvals)

grid = np.zeros([1, sz, sz, 2], dtype='float32')
grid[0, :, :, 0] = XX + 0.2
grid[0, :, :, 1] = YY

ISource = torch.from_numpy(I0)
gridV = torch.from_numpy(grid)

#print input2.data
#s = STN(layout = 'BCHW')
s = STN()
start = time.time()
out = s(ISource, gridV)

plt.figure(1)
plt.subplot(121)
plt.imshow(I0[0, :, :, 0])
plt.subplot(122)
plt.imshow(utils.t2np(out[0, :, :, 0]))

print(out.size(), 'time:', time.time() - start)
start = time.time()
out.backward(gridV.data)
print(gridV.grad.size(), 'time:', time.time() - start)
Example #14
0
def visualize1DGrad():
    plt.figure(1)
    plt.plot(utils.t2np(grad[0, :]))
    plt.title('grad')
    plt.show()