Esempio n. 1
0
def extract_patch_from_points(heatmap, points, patch_size=5):
    """
    this function works in numpy
    """
    import numpy as np
    from utils.utils import toNumpy
    # numpy
    if type(heatmap) is torch.Tensor:
        heatmap = toNumpy(heatmap)
    heatmap = heatmap.squeeze()  # [H, W]
    # padding
    pad_size = int(patch_size/2)
    heatmap = np.pad(heatmap, pad_size, 'constant')
    # crop it
    patches = []
    ext = lambda img, pnt, wid: img[pnt[1]:pnt[1]+wid, pnt[0]:pnt[0]+wid]
    #print("heatmap: ", heatmap.shape)
    for i in range(points.shape[0]):
        # print("point: ", points[i,:])
        patch = ext(heatmap, points[i,:].astype(int), patch_size)
        # print("patch: ", patch.shape)
        patches.append(patch)
        
        # if i > 10: break
    # extract points
    return patches
Esempio n. 2
0
def testMSE(args, model, test_loader, tstep=100, test_every=2):
    '''
    Tests the base deterministic model of a single mini-batch
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases (use createTestingLoader)
        tstep (int): number of timesteps to predict for
        test_every (int): Time-step interval to test (must match simulator), default = 2
    Returns:
        uPred (torch.Tensor): [d x t x n] predicted quantities
        u_target (torch.Tensor): [d x t x n] target/ simulated values
        mse_error  (torch.Tensor): [d x t] time dependednt mean squared error
        ese_error  (torch.Tensor): [d x t] time dependednt energy squared error
    '''
    model.eval()
    testIdx = (tstep) // test_every + 1
    mb_size = int(len(test_loader.dataset) / len(test_loader))
    u_out = torch.zeros(len(test_loader.dataset), tstep + 1, 2, args.nel,
                        args.nel)
    u_target = torch.zeros(len(test_loader.dataset), testIdx, 2, args.nel,
                           args.nel)
    error = torch.zeros(len(test_loader.dataset), tstep + 1, 2)
    error2 = torch.zeros(len(test_loader.dataset), tstep + 1, 2)

    for bidx, (input0, uTarget0) in enumerate(test_loader):

        u_out[bidx * mb_size:(bidx + 1) * mb_size, 0] = input0.cpu()
        u_target[bidx * mb_size:(bidx + 1) *
                 mb_size] = uTarget0[:, :testIdx].cpu()

        # Expand input to match model in channels
        dims = torch.ones(len(input0.shape))
        dims[1] = args.nic
        input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(
            args.device)

        # Auto-regress
        for tidx in range(tstep):
            uPred = model(input[:, -2 * args.nic:, :, :])
            u_out[bidx * mb_size:(bidx + 1) * mb_size,
                  tidx + 1] = uPred.detach().cpu()

            input = input[:, -2 * int(args.nic - 1):, :].detach()
            input0 = uPred.detach()
            input = torch.cat([input, input0], dim=1)

    # Reshape and compress last three dims for easy mean calculations
    u_out = u_out.view(len(test_loader.dataset), tstep + 1,
                       -1)[:, ::test_every]
    u_target = u_target.view(len(test_loader.dataset), testIdx, -1)
    # Calc MSE and ESE errors of all collocation points and x/y components
    mse_error = torch.mean(torch.pow(u_out - u_target, 2), dim=-1)
    ese_error = torch.pow(
        torch.sum(torch.pow(u_out, 2) / 2.0, dim=-1) / (args.nel**2) -
        torch.sum(torch.pow(u_target, 2) / 2.0, dim=-1) / (args.nel**2), 2)

    return u_out, u_target, mse_error, ese_error
Esempio n. 3
0
def testSample(args,
               swag_nn,
               test_loader,
               tstep=100,
               n_samples=10,
               test_every=2):
    '''
    Tests the samples of the Bayesian SWAG model
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases (use createTestingLoader)
        tstep (int): number of timesteps to predict for
        n_samples (int): number of model samples to draw
        test_every (int): Time-step interval to test (must match simulator), default = 2
    Returns:
        u_out (torch.Tensor): [d x nsamples x (tstep+1)//test_every x 2 x nel x nel] predicted quantities of each sample
        u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
    '''
    mb_size = int(len(test_loader.dataset) / len(test_loader))
    u_out = torch.zeros(len(test_loader.dataset), n_samples,
                        (tstep) // test_every + 1, 2, args.nel, args.nel)
    u_target = torch.zeros(len(test_loader.dataset), (tstep) // test_every + 1,
                           2, args.nel, args.nel)

    for i in range(n_samples):
        print('Executing model sample {:d}'.format(i))
        model = swag_nn.sample(
            diagCov=True)  # Use diagonal approx. only when training
        model.eval()

        for bidx, (input0, uTarget0) in enumerate(test_loader):
            # Expand input to match model in channels
            dims = torch.ones(len(input0.shape))
            dims[1] = args.nic
            input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(
                args.device)

            if (i == 0):  # Save target data
                u_target[bidx * mb_size:(bidx + 1) *
                         mb_size] = uTarget0[:, :(tstep // test_every + 1)]
            u_out[bidx * mb_size:(bidx + 1) * mb_size, i, 0, :, :, :] = input0
            # Auto-regress
            for t_idx in range(tstep):
                uPred = model(input[:, -2 * args.nic:, :])
                if ((t_idx + 1) % test_every == 0):
                    u_out[bidx * mb_size:(bidx + 1) * mb_size, i,
                          t_idx // test_every + 1, :, :, :] = uPred

                input = input[:, -2 * int(args.nic - 1):, :].detach()
                input0 = uPred.detach()
                input = torch.cat([input, input0], dim=1)

    return u_out, u_target
Esempio n. 4
0
def testSample(args, swag_nn, test_loader, tstep=100, n_samples=10):
    '''
    Tests samples of the model using SWAG of the first test mini-batch in the testing loader
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases with no shuffle (use createTestingLoader)
        tstep (int): number of timesteps to predict for
        n_samples (int): number of model samples to draw, default 10
    Returns:
        uPred (torch.Tensor): [mb x n_samp x t x n] predicted quantities
        u_target (torch.Tensor): [mb x t x n] target/ simulated values
    '''

    mb_size = int(len(test_loader.dataset) / len(test_loader))
    u_out = torch.zeros(mb_size, n_samples, tstep + 1, 2, args.nel, args.nel)

    for i in range(n_samples):
        print('Executing model sample {:d}'.format(i))
        model = swag_nn.sample(diagCov=False)
        model.eval()

        for batch_idx, (input0, uTarget0) in enumerate(test_loader):
            # Expand input to match model in channels
            dims = torch.ones(len(input0.shape))
            dims[1] = args.nic
            input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(
                args.device)

            u_target = uTarget0
            u_out[:, i, 0, :, :, :] = input0
            # Auto-regress
            for t_idx in range(tstep):
                uPred = model(input[:, -2 * args.nic:, :])

                u_out[:, i, t_idx + 1, :, :, :] = uPred

                input = input[:, -2 * int(args.nic - 1):, :].detach()
                input0 = uPred.detach()
                input = torch.cat([input, input0], dim=1)

            # Only do the first mini-batch
            break

    return u_out, u_target
Esempio n. 5
0
def test(args, model, test_loader, tstep=100, test_every=2):
    '''
    Tests the deterministic model
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases (use createTestingLoader)
        tstep (int): number of timesteps to predict for
        test_every (int): Time-step interval to test (must match simulator), default = 2
    Returns:
        u_out (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] predicted quantities
        u_target (torch.Tensor): [d x (tstep+1)//test_every x 2 x nel x nel] respective target values loaded from simulator
    '''
    model.eval()
    mb_size = int(len(test_loader.dataset) / len(test_loader))
    u_out = torch.zeros(len(test_loader.dataset), tstep // test_every + 1, 2,
                        args.nel, args.nel)
    u_target = torch.zeros(len(test_loader.dataset), tstep // test_every + 1,
                           2, args.nel, args.nel)

    for bidx, (input0, uTarget0) in enumerate(test_loader):
        # Expand input to match model in channels
        dims = torch.ones(len(input0.shape))
        dims[1] = args.nic
        input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(
            args.device)

        u_out[bidx * mb_size:(bidx + 1) * mb_size, 0] = input0
        u_target[bidx * mb_size:(bidx + 1) *
                 mb_size] = uTarget0[:, :(tstep // test_every + 1)].cpu()

        # Auto-regress
        for t_idx in range(tstep):
            uPred = model(input[:, -2 * args.nic:, :, :])
            if ((t_idx + 1) % test_every == 0):
                u_out[bidx * mb_size:(bidx + 1) * mb_size,
                      (t_idx + 1) // test_every, :, :, :] = uPred

            input = input[:, -2 * int(args.nic - 1):, :].detach()
            input0 = uPred.detach()
            input = torch.cat([input, input0], dim=1)

    return u_out, u_target
Esempio n. 6
0
def test(args, model, test_loader, tstep=100):
    '''
    Tests the base derterministic model of a single mini-batch
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases (use createTestingLoader)
        tstep (int): number of timesteps to predict for
    Returns:
        uPred (torch.Tensor): [mb x t x n] predicted quantities
    '''
    model.eval()
    mb_size = int(len(test_loader.dataset)/len(test_loader))
    u_out = torch.zeros(mb_size, tstep+1, 2, args.nel, args.nel).to(args.device)

    for bidx, (input0, uTarget0) in enumerate(test_loader):
        # Expand input to match model in channels
        dims = torch.ones(len(input0.shape))
        dims[1] = args.nic
        input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(args.device)
        
        if(bidx == 0):
            u_out[:,0,:,:,:] = input0
            u_target = uTarget0
        # Auto-regress
        for t_idx in range(tstep):
            uPred = model(input[:,-2*args.nic:,:,:])
            if(bidx == 0):
                u_out[:,t_idx+1,:,:,:] = uPred
            
            input = input[:,-2*int(args.nic-1):,:].detach()
            input0 = uPred.detach()
            input = torch.cat([input,  input0], dim=1)
        break
        
    return u_out, u_target
Esempio n. 7
0
    def run(self, inp, onlyHeatmap=False, train=True):
        """ Process a numpy image to extract points and descriptors.
        Input
          img - HxW tensor float32 input image in range [0,1].
        Output
          corners - 3xN numpy array with corners [x_i, y_i, confidence_i]^T.
          desc - 256xN numpy array of corresponding unit normalized descriptors.
          heatmap - HxW numpy heatmap in range [0,1] of point confidences.
          """
        # assert img.ndim == 2, 'Image must be grayscale.'
        # assert img.dtype == np.float32, 'Image must be float32.'
        # H, W = img.shape[0], img.shape[1]
        # inp = img.copy()
        # inp = (inp.reshape(1, H, W))
        # inp = torch.from_numpy(inp)
        # inp = torch.autograd.Variable(inp).view(1, 1, H, W)
        # if self.cuda:
        inp = inp.to(self.device)
        batch_size, H, W = inp.shape[0], inp.shape[2], inp.shape[3]
        if train:
            # outs = self.net.forward(inp, subpixel=self.subpixel)
            outs = self.net.forward(inp)
            # semi, coarse_desc = outs[0], outs[1]
            semi, coarse_desc = outs['semi'], outs['desc']
        else:
            # Forward pass of network.
            with torch.no_grad():
                # outs = self.net.forward(inp, subpixel=self.subpixel)
                outs = self.net.forward(inp)
                # semi, coarse_desc = outs[0], outs[1]
                semi, coarse_desc = outs['semi'], outs['desc']

        # as tensor
        from utils.utils import labels2Dto3D, flattenDetection
        from utils.d2s import DepthToSpace
        # flatten detection
        heatmap = flattenDetection(semi, tensor=True)
        self.heatmap = heatmap
        # depth2space = DepthToSpace(8)
        # print(semi.shape)
        # heatmap = depth2space(semi[:,:-1,:,:]).squeeze(0)
        ## need to change for batches

        if onlyHeatmap:
            return heatmap

        # extract keypoints
        # pts = [self.getPtsFromHeatmap(heatmap[i,:,:,:].cpu().detach().numpy().squeeze()).transpose() for i in range(batch_size)]
        # pts = [self.getPtsFromHeatmap(heatmap[i,:,:,:].cpu().detach().numpy().squeeze()) for i in range(batch_size)]
        # print("heapmap shape: ", heatmap.shape)
        pts = [
            self.getPtsFromHeatmap(heatmap[i, :, :, :].cpu().detach().numpy())
            for i in range(batch_size)
        ]
        self.pts = pts

        if self.subpixel:
            labels_res = outs[2]
            self.pts_subpixel = [
                self.subpixel_predict(toNumpy(labels_res[i, ...]), pts[i])
                for i in range(batch_size)
            ]
        '''
        pts:
            list [batch_size, np(N_i, 3)] -- each point (x, y, probability)
        '''

        # interpolate description
        '''
        coarse_desc:
            tensor (Batch_size, 256, Hc, Wc)
        dense_desc:
            tensor (batch_size, 256, H, W)
        '''
        # m = nn.Upsample(scale_factor=(1, self.cell, self.cell), mode='bilinear')
        dense_desc = nn.functional.interpolate(coarse_desc,
                                               scale_factor=(self.cell,
                                                             self.cell),
                                               mode='bilinear')

        # norm the descriptor
        def norm_desc(desc):
            dn = torch.norm(desc, p=2, dim=1)  # Compute the norm.
            desc = desc.div(torch.unsqueeze(dn,
                                            1))  # Divide by norm to normalize.
            return desc

        dense_desc = norm_desc(dense_desc)

        # extract descriptors
        dense_desc_cpu = dense_desc.cpu().detach().numpy()
        # pts_desc = [dense_desc_cpu[i, :, pts[i][:, 1].astype(int), pts[i][:, 0].astype(int)] for i in range(len(pts))]
        pts_desc = [
            dense_desc_cpu[i, :, pts[i][1, :].astype(int),
                           pts[i][0, :].astype(int)].transpose()
            for i in range(len(pts))
        ]

        if self.subpixel:
            return self.pts_subpixel, pts_desc, dense_desc, heatmap
        return pts, pts_desc, dense_desc, heatmap
Esempio n. 8
0
def testBayesianMSE(args,
                    swag_nn,
                    test_loader,
                    tstep=100,
                    n_samples=10,
                    test_every=2):
    '''
    Tests samples of the model using SWAG and calculates error values
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): DenseED model to be tested
        test_loader (dataloader): dataloader with test cases with no shuffle (use createTestingLoader)
        tstep (int): number of timesteps to predict for
        n_samples (int): Number of model samples to test
        test_every (int): Time-step interval to test (must match simulator), default = 2
    Returns:
        uPred (torch.Tensor): [d x nsamp x t x n] predicted quantities
        u_target (torch.Tensor): [d x nsamp x t x n] target/ simulated values
        mse_error  (torch.Tensor): [d x t] time dependednt mean squared error using expected prediction
        ese_error  (torch.Tensor): [d x t] time dependednt energy squared error using expected prediction
    '''
    testIdx = (tstep) // test_every + 1
    mb_size = int(len(test_loader.dataset) / len(test_loader))
    u_out = torch.zeros(len(test_loader.dataset), n_samples, tstep + 1, 2,
                        args.nel, args.nel)
    u_target = torch.zeros(len(test_loader.dataset), testIdx, 2, args.nel,
                           args.nel)

    for i in range(n_samples):
        print('Executing model sample {:d}'.format(i))
        model = swag_nn.sample(diagCov=False)
        model.eval()

        for bidx, (input0, uTarget0) in enumerate(test_loader):
            u_out[bidx * mb_size:(bidx + 1) * mb_size, i, 0] = input0
            u_target[bidx * mb_size:(bidx + 1) *
                     mb_size] = uTarget0[:, :testIdx]

            # Expand input to match model in channels
            dims = torch.ones(len(input0.shape))
            dims[1] = args.nic
            input = input0.repeat(toTuple(toNumpy(dims).astype(int))).to(
                args.device)

            # Auto-regress
            for tidx in range(tstep):
                uPred = model(input[:, -2 * args.nic:, :, :])

                u_out[bidx * mb_size:(bidx + 1) * mb_size, i,
                      tidx + 1] = uPred.detach().cpu()

                input = input[:, -2 * int(args.nic - 1):, :].detach()
                input0 = uPred.detach()
                input = torch.cat([input, input0], dim=1)

    # Calc MSE and ESE errors
    u_out = u_out.view(len(test_loader.dataset), n_samples, tstep + 1,
                       -1)[:, :, ::test_every]
    u_target = u_target.view(len(test_loader.dataset), testIdx, -1)
    mean_pred = torch.mean(u_out, dim=1)
    mse_error = torch.mean(torch.pow(mean_pred.double() - u_target.double(),
                                     2),
                           dim=-1)
    ese_error = torch.pow(
        torch.sum(torch.pow(mean_pred, 2) / 2.0, dim=-1) / (args.nel**2) -
        torch.sum(torch.pow(u_target, 2) / 2.0, dim=-1) / (args.nel**2), 2)

    return u_out, u_target, mse_error, ese_error
Esempio n. 9
0
def train(args,
          model,
          burgerInt,
          train_loader,
          optimizer,
          tsteps,
          tback,
          tstart,
          dt=0.1):
    '''
    Trains the model
    Args:
        args (argparse): object with programs arguements
        model (PyTorch model): SWAG DenseED model to be tested
        burgerInt (BurgerIntegrate): 1D Burger system time integrator
        train_loader (dataloader): dataloader with training cases (use createTrainingLoader)
        optimizer (Pytorch Optm): optimzer
        tsteps (np.array): [mb] number of timesteps to predict for each mini-batch
        tback (np.array): [mb] number of timesteps to forward predict before back prop
        tstart (np.array): [mb] time-step to start updating model (kept at 0 for now)
        dt (float): current time-step size of the model (used to progressively increase time-step size)
    Returns:
        loss_total (float): negative log joint posterior
        mse_total (float): mean square error between the prediction and time-integrator
    '''
    model.train()

    loss_total = 0
    mse_total = 0
    # Mini-batch loop
    for batch_idx, input in enumerate(train_loader):
        # input [b, 2, x, y]
        # Expand input to match model in channels
        dims = torch.ones(len(input.shape))
        dims[1] = args.nic
        input = input.repeat(toTuple(toNumpy(dims).astype(int))).to(
            args.device)

        loss = 0

        # Loop for number of timesteps
        optimizer.zero_grad()
        for i in range(tsteps[batch_idx]):

            uPred = model(input[:, -2 * args.nic:, :])

            if (i < tstart[batch_idx]):
                # Don't calculate residual, just predict forward
                input = input[:, -2 * int(args.nic - 1):, :].detach()
                input0 = uPred[:, 0, :].unsqueeze(1).detach()
                input = torch.cat([input, input0], dim=1)
            else:
                # Calculate loss
                # Start with implicit time integration
                ustar = burgerInt.crankNicolson(uPred, input[:, -2:, :], dt)
                # Calc. loss based on posterior of the model
                log_joint = model.calc_neg_log_joint(uPred, ustar,
                                                     len(train_loader))
                loss = loss + log_joint

                loss_total = loss_total + loss.data.item()
                mse_total += F.mse_loss(
                    uPred.detach(), ustar.detach()).item()  # MSE for scheduler

                # Back-prop through two timesteps
                if ((i + 1) % tback[batch_idx] == 0):
                    loss.backward()
                    loss = 0

                    optimizer.step()
                    optimizer.zero_grad()
                    input = input[:, -2 * int(args.nic - 1):, :].detach()
                    input0 = uPred.detach()
                    input = torch.cat([input, input0], dim=1)
                else:
                    input0 = uPred
                    input = torch.cat([input, input0], dim=1)

        if (batch_idx % 10 == 1):
            print("Epoch {}, Mini-batch {}/{} ({}%) ".format(epoch, batch_idx, \
                len(train_loader), int(100*batch_idx/len(train_loader))))

    return loss_total / len(train_loader), mse_total / len(train_loader)