Beispiel #1
0
    def __init__(self, nef=64, n_layers = 3, in_channels=3, oneconv = True, grayedge = True, 
                 embeding_size=128, n_mlp=4):
        super(Generator256, self).__init__()

        self.oneconv = oneconv
        self.grayedge = grayedge
        self.rgbchannels = 3
        self.edgechannels = 3
        if self.grayedge:
            self.edgechannels = 1
        if self.oneconv:
            self.edgechannels = self.edgechannels + self.rgbchannels  
        self.embeding_size = embeding_size
        self.embedding = StyleGenerator(embeding_size, n_mlp)
        
        modelList = []
        # 3*256*256 x --> 64*256*256 x1
        self.pad1 = ReflectionPad2d(padding=1)
        self.conv1 = Conv2d(out_channels=nef, kernel_size=3, padding=0, in_channels=in_channels)
        # 64*256*256 x1 --> 128*128*128 x2
        self.rcb1 = RCBBlock(nef, nef*2, 3, 1, embeding_size) 
        # 128*128*128 x2+y --> 128*128*128 x3
        for n in range(n_layers):
            modelList.append(ResnetBlock(nef*2, nef*2, weight=1.0, embeding_size=embeding_size))  
        # 128*128*128 x3 --> 64*256*256 
        self.rdcb1 = RDCBBlock(nef*2, nef, 3, 1, embeding_size, True)
        self.resblocks = nn.Sequential(*modelList)
        # 64*256*256 x4 --> 6*256*256
        self.pad2 = ReflectionPad2d(padding=1)
        self.relu = ReLU()
        self.conv2 = Conv2d(out_channels=self.edgechannels, kernel_size=3, padding=0, in_channels=nef*2)
        self.tanh = Tanh()     
        self.conv3 = Conv2d(out_channels=self.rgbchannels, kernel_size=3, padding=0, in_channels=nef*2)
Beispiel #2
0
    def test(self, data_loader):
        print('Test phase using {} split.'.format(self.opt.test_split))
        epoch = 'test'
        data_iter = iter(data_loader)

        self.netG.eval()
        total_iter = 0

        for it, (input, target, target_2) in enumerate(tqdm(data_loader)):
            total_iter += 1
            
            input, target, target_2 = self.get_variable(input), self.get_variable(target), self.get_variable(target_2)

            # self.complete_padding = True
            if self.opt.use_padding:
                from torch.nn import ReflectionPad2d

                self.opt.padding = self.get_padding_image(input)

                input = ReflectionPad2d(self.opt.padding)(input)
                target = ReflectionPad2d(self.opt.padding)(target)
                target_2 = ReflectionPad2d(self.opt.padding)(target_2)

            with torch.no_grad():
                outG_1, outG_2 = self.netG.forward(input)

            if self.opt.save_samples:
                # visuals = OrderedDict([('input', input.data),
                #                       ('gt', target.data),
                #                       ('output', outG.data)])
                # visualizer.display_images(visuals, epoch)
                self.save_images(input, outG_1, outG_2, target, it + 1, 'test', out_type=self.get_type(self.opt.save_bin))
Beispiel #3
0
    def __init__(self):
        super(SiameseNetwork, self).__init__()
        self.cnn1 = Sequential(
            ReflectionPad2d(1),
            Conv2d(1, 4, kernel_size=3),
            ReLU(inplace=True),
            BatchNorm2d(4),
            ReflectionPad2d(1),
            Conv2d(4, 8, kernel_size=3),
            ReLU(inplace=True),
            BatchNorm2d(8),
            ReflectionPad2d(1),
            Conv2d(8, 8, kernel_size=3),
            ReLU(inplace=True),
            BatchNorm2d(8),
        )

        self.fc1 = Sequential(Linear(8 * 100 * 100, 500), ReLU(inplace=True),
                              Linear(500, 500), ReLU(inplace=True),
                              Linear(500, 5))

        def forward_once(self, x):
            output = self.cnn1(x)
            output = output.view(output.size()[0], -1)
            output = self.fc1(output)
            return output

        def forward(self, input1, input2):
            output1 = self.forward_once(input1)
            output2 = self.forward_once(input2)
            return output1, output2
Beispiel #4
0
    def get_eval_error(self, val_loader, model, criterion, epoch):
        """
        Validate every self.opt.val_freq epochs
        """
        # no need to switch to model.eval because we want to keep dropout layers. Do I gave to ignore batch norm layers?
        cumulated_rmse = 0
        batchSize = 1
        input = self.get_variable(torch.FloatTensor(batchSize, 3,
                                                    self.opt.imageSize[0],
                                                    self.opt.imageSize[1]),
                                  requires_grad=False)
        mask = self.get_variable(torch.FloatTensor(batchSize, 1,
                                                   self.opt.imageSize[0],
                                                   self.opt.imageSize[1]),
                                 requires_grad=False)
        target = self.get_variable(
            torch.FloatTensor(batchSize, 1, self.opt.imageSize[0],
                              self.opt.imageSize[1]))
        # model.eval()
        model.train(False)
        pbar_val = tqdm(val_loader)
        for i, (rgb_cpu, depth_cpu) in enumerate(pbar_val):
            pbar_val.set_description('[Validation]')
            input.data.resize_(rgb_cpu.size()).copy_(rgb_cpu)
            target.data.resize_(depth_cpu.size()).copy_(depth_cpu)

            if self.opt.use_padding:
                from torch.nn import ReflectionPad2d

                self.opt.padding = self.get_padding_image(input)

                input = ReflectionPad2d(self.opt.padding)(input)
                target = ReflectionPad2d(self.opt.padding)(target)

            # get output of the network
            with torch.no_grad():
                outG = model.forward(input)
            # apply mask
            nomask_outG = outG.data  # for displaying purposes
            mask_ByteTensor = self.get_mask(target.data)
            mask.data.resize_(mask_ByteTensor.size()).copy_(mask_ByteTensor)
            outG = outG * mask
            target = target * mask
            cumulated_rmse += sqrt(criterion(outG, target, mask,
                                             no_mask=False))

            if (i == 1):
                self.visualizer.display_images(OrderedDict([
                    ('input', input.data), ('gt', target.data),
                    ('output', nomask_outG)
                ]),
                                               epoch='val {}'.format(epoch),
                                               phase='val')

        return cumulated_rmse / len(val_loader)
Beispiel #5
0
    def __init__(self, nef=64, out_channels=3, in_channels=3, useNorm='BN'):
        super(Pix2pix256, self).__init__()

        # 256*256*3-->256*256*32
        self.pad1 = ReflectionPad2d(padding=1)
        self.conv1 = Conv2d(in_channels, nef, 3, 1, 0)
        # 256*256*32-->128*128*64
        self.rcb0 = RCBBlock(nef, nef * 2, useNorm=useNorm)
        # 128*128*64-->64*64*128
        self.rcb1 = RCBBlock(nef * 2, nef * 4, useNorm=useNorm)
        # 64*64*128-->32*32*256
        self.rcb2 = RCBBlock(nef * 4, nef * 8, useNorm=useNorm)
        # 32*32*256-->16*16*512
        self.rcb3 = RCBBlock(nef * 8, nef * 8, useNorm=useNorm)
        # 16*16*512-->8*8*512
        self.rcb4 = RCBBlock(nef * 8, nef * 8, useNorm=useNorm)
        # 8*8*512-->4*4*512
        self.rcb5 = RCBBlock(nef * 8, nef * 8, useNorm=useNorm)
        # 4*4*512-->2*2*512
        self.rcb6 = RCBBlock(nef * 8, nef * 8, useNorm=useNorm)
        # 2*2*512-->1*1*512
        self.relu = LeakyReLU(0.2)
        self.pad2 = ReflectionPad2d(padding=1)
        self.conv7 = Conv2d(nef * 8, nef * 8, 4, 2, 0)
        # 1*1*512-->2*2*512
        self.rdcb7 = RDCBBlock(nef * 8,
                               nef * 8,
                               useNorm=useNorm,
                               up=True,
                               padding='repeat')
        # 2*2*1024-->4*4*512
        self.rdcb6 = RDCBBlock(nef * 16, nef * 8, useNorm=useNorm, up=True)
        # 4*4*1024-->8*8*512
        self.rdcb5 = RDCBBlock(nef * 16, nef * 8, useNorm=useNorm, up=True)
        # 8*8*1024-->16*16*512
        self.rdcb4 = RDCBBlock(nef * 16, nef * 8, useNorm=useNorm, up=True)
        # 16*16*512-->32*32*256
        self.rdcb3 = RDCBBlock(nef * 16, nef * 8, useNorm=useNorm, up=True)
        # 32*32*512-->64*64*128
        self.rdcb2 = RDCBBlock(nef * 16, nef * 4, useNorm=useNorm, up=True)
        # 64*64*256-->128*128*64
        self.rdcb1 = RDCBBlock(nef * 8, nef * 2, useNorm=useNorm, up=True)
        # 128*128*128-->256*256*32
        self.rdcb0 = RDCBBlock(nef * 4, nef, useNorm=useNorm, up=True)
        # 256*256*32-->256*256*3
        self.pad3 = ReflectionPad2d(padding=1)
        self.dconv1 = Conv2d(nef * 2, out_channels, 3, 1, 0)
        self.tanh = Tanh()
Beispiel #6
0
    def __init__(self, in_channels, out_channels=64, kernel_size=3, padding=1, embeding_size=128):
        super(RCBBlock, self).__init__()
        
        self.relu = LeakyReLU(0.2)
        self.pad = ReflectionPad2d(padding=padding)
        self.conv = Conv2d(out_channels=out_channels, kernel_size=kernel_size, stride=2,
                              padding=0, in_channels=in_channels)

        self.bn = AdaptiveInstanceNorm(embeding_size, out_channels)
Beispiel #7
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              upsample=None):
     super(UpsampleConLayer, self).__init__()
     self.upsample = upsample
     reflection_padding = kernel_size // 2
     self.reflection_pad = ReflectionPad2d(reflection_padding)
     self.conv2d = Conv2d(in_channels, out_channels, kernel_size, stride)
Beispiel #8
0
    def build(self):
        pad_size_tmp = self.pad_size

        # This allow to handle the case where the padding is equal to the image size
        if pad_size_tmp[0] == self.input_size[0]:
            pad_size_tmp[0] -= 1
            pad_size_tmp[1] -= 1
        if pad_size_tmp[2] == self.input_size[1]:
            pad_size_tmp[2] -= 1
            pad_size_tmp[3] -= 1
        self.padding_module = ReflectionPad2d(pad_size_tmp)
Beispiel #9
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride,
                 upsample=False):
        super().__init__()

        self.upsample = Upsample(scale_factor=2) if upsample else None
        self.padding = ReflectionPad2d(kernel_size //
                                       2) if kernel_size // 2 else None
        self.conv = Conv2d(in_channels, out_channels, kernel_size, stride)
def get_default_transforms(device):
    return Compose([
        ImagenetNorm(),
        AddBatchDim(),
        ReflectionPad2d(16),
        TranslateTensor(max_shift=16, device=device),
        ScaleTensor(scale_factors=[1, 0.975, 1.025, 0.95, 1.05],
                    device=device),
        RotateTensor(angles=list(range(-5, 6)), device=device),
        TranslateTensor(max_shift=8, device=device),
        CropTensorPadding(16)
    ])
Beispiel #11
0
    def __init__(self,
                 fin,
                 fout,
                 fhidden=None,
                 weight=0.1,
                 is_bias=True,
                 embeding_size=128):
        super(ResnetBlock, self).__init__()
        # Attributes
        self.is_bias = is_bias
        self.weight = weight
        self.learned_shortcut = (fin != fout)
        self.fin = fin
        self.fout = fout
        self.actvn = LeakyReLU(0.2)
        self.embeding_size = embeding_size
        if fhidden is None:
            self.fhidden = min(fin, fout)
        else:
            self.fhidden = fhidden

        self.bn1 = AdaptiveInstanceNorm(self.embeding_size, self.fhidden)
        self.bn2 = AdaptiveInstanceNorm(self.embeding_size, self.fout)

        self.pad_0 = ReflectionPad2d(padding=1)
        self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=0)
        self.pad_1 = ReflectionPad2d(padding=1)
        self.conv_1 = nn.Conv2d(self.fhidden,
                                self.fout,
                                3,
                                stride=1,
                                padding=0,
                                bias=is_bias)
        if self.learned_shortcut:
            self.conv_s = nn.Conv2d(self.fin,
                                    self.fout,
                                    1,
                                    stride=1,
                                    padding=0,
                                    bias=False)
Beispiel #12
0
    def build(self):
        pad_size_tmp = list(self.pad_size)

        # This allow to handle the case where the padding is equal to the image size
        if pad_size_tmp[0] == self.input_size[0]:
            pad_size_tmp[0] -= 1
            pad_size_tmp[1] -= 1
        if pad_size_tmp[2] == self.input_size[1]:
            pad_size_tmp[2] -= 1
            pad_size_tmp[3] -= 1
        # Pytorch expects its padding as [left, right, top, bottom]
        self.padding_module = ReflectionPad2d([pad_size_tmp[2], pad_size_tmp[3],
                                               pad_size_tmp[0], pad_size_tmp[1]])
Beispiel #13
0
 def __init__(self, in_channels, out_channels=64, kernel_size=3, padding=1, useNorm='BN'):
     super(RCBBlock, self).__init__()
     
     self.relu = LeakyReLU(0.2)
     self.pad = ReflectionPad2d(padding=padding)
     self.conv = Conv2d(out_channels=out_channels, kernel_size=kernel_size, stride=2,
                           padding=0, in_channels=in_channels)
     if useNorm == 'IN':
         self.bn = InstanceNorm2d(num_features=out_channels, affine=True)
     elif useNorm == 'BN':
         self.bn = BatchNorm2d(num_features=out_channels)
     else:
         self.bn = Identity()
Beispiel #14
0
    def __init__(self, pad_size, pre_pad=False):
        """
            Padding which allows to simultaneously pad in a reflection fashion
            and map to complex.

            Parameters
            ----------
            pad_size : int
                size of padding to apply.
            pre_pad : boolean
                if set to true, then there is no padding, one simply adds the imaginarty part.
        """
        self.pre_pad = pre_pad
        self.padding_module = ReflectionPad2d(pad_size)
Beispiel #15
0
    def __init__(self, in_channels, out_channels, ndf=64, n_layers=3, input_size=64, useFC=False):
        super(DiscriminatorSN, self).__init__()
        
        modelList = []       
        kernel_size = 4
        padding = int(np.ceil((kernel_size - 1)/2))
        modelList.append(ReflectionPad2d(padding=padding))
        modelList.append(spectral_norm(Conv2d(out_channels=ndf, kernel_size=kernel_size, stride=2,
                              padding=0, in_channels=in_channels)))
        modelList.append(LeakyReLU(0.2))
        self.useFC = useFC
        
        # 32*32 --> 16*16 --> 8*8
        size = input_size//2
        nf_mult = 1
        for n in range(1, n_layers):
            size = size // 2
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n, 8)
            modelList.append(ReflectionPad2d(padding=padding))
            modelList.append(spectral_norm(Conv2d(out_channels=ndf * nf_mult, kernel_size=kernel_size, stride=2,
                                  padding=0, in_channels=ndf * nf_mult_prev)))
            modelList.append(LeakyReLU(0.2))
        # 7*7
        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8)
        modelList.append(ReflectionPad2d(padding=padding))
        modelList.append(spectral_norm(Conv2d(out_channels=ndf * nf_mult, kernel_size=kernel_size, stride=1,
                              padding=0, in_channels=ndf * nf_mult_prev)))
        # 6*6
        modelList.append(LeakyReLU(0.2))
        modelList.append(ReflectionPad2d(padding=padding))
        modelList.append(spectral_norm(Conv2d(out_channels=out_channels, kernel_size=kernel_size, stride=1,
                              padding=0, in_channels=ndf * nf_mult)))

        self.model = nn.Sequential(*modelList)
        self.fc = spectral_norm(nn.Linear((size-2)*(size-2)*out_channels, 1))
 def __init__(
     self,
     in_channels: InChannels,
     out_channels: OutChannels,
     kernel_size: KernelSize,
     stride: Stride,
 ) -> None:
     super(ConvLayer, self).__init__()
     reflection_padding = kernel_size // 2
     self.reflection_pad: ReflectionPad2d[
         Divide[KernelSize,
                L[2]]] = ReflectionPad2d(reflection_padding)  # type: ignore
     self.conv2d: Conv2d[InChannels, OutChannels, KernelSize,
                         Stride] = Conv2d(in_channels, out_channels,
                                          kernel_size, stride)
Beispiel #17
0
    def __init__(self, width, height, num_encoders):

        self.height = height
        self.width = width
        self.num_encoders = num_encoders
        self.width_crop_size = optimal_crop_size(self.width, num_encoders)
        self.height_crop_size = optimal_crop_size(self.height, num_encoders)

        self.padding_top = ceil(0.5 * (self.height_crop_size - self.height))
        self.padding_bottom = floor(0.5 * (self.height_crop_size - self.height))
        self.padding_left = ceil(0.5 * (self.width_crop_size - self.width))
        self.padding_right = floor(0.5 * (self.width_crop_size - self.width))
        self.pad = ReflectionPad2d((self.padding_left, self.padding_right, self.padding_top, self.padding_bottom))

        self.cx = floor(self.width_crop_size / 2)
        self.cy = floor(self.height_crop_size / 2)

        self.ix0 = self.cx - floor(self.width / 2)
        self.ix1 = self.cx + ceil(self.width / 2)
        self.iy0 = self.cy - floor(self.height / 2)
        self.iy1 = self.cy + ceil(self.height / 2)
Beispiel #18
0
    def build(self):
        """Builds the padding module.

            Attributes
            ----------
            padding_module : ReflectionPad2d
                Pads the input tensor using the reflection of the input
                boundary.

        """
        pad_size_tmp = list(self.pad_size)

        # This handles the case where the padding is equal to the image size
        if pad_size_tmp[0] == self.input_size[0]:
            pad_size_tmp[0] -= 1
            pad_size_tmp[1] -= 1
        if pad_size_tmp[2] == self.input_size[1]:
            pad_size_tmp[2] -= 1
            pad_size_tmp[3] -= 1
        # Pytorch expects its padding as [left, right, top, bottom]
        self.padding_module = ReflectionPad2d([pad_size_tmp[2], pad_size_tmp[3],
                                               pad_size_tmp[0], pad_size_tmp[1]])
    def test_raster_notarget(self, load_data):
        from dataloader.dataset_bank import dataset_vaihingen
        print('Test phase using {} split.'.format(self.opt.test_split))
        phase = 'test'

        imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        test_stride = self.opt.test_stride if len(self.opt.test_stride) == 2 else self.opt.test_stride * 2
        input_list = dataset_vaihingen(self.opt.dataroot, data_split=self.opt.test_split, phase='test', model=self.opt.model)

        data_loader = [rgb for rgb in load_data(input_list, phase)] 
        
        self.netG.eval()

        prob_matrix = self.gaussian_kernel(imageSize[0], imageSize[1])
        
        for it, input in enumerate(tqdm(data_loader)):
            rgb_cache = []

            pred_gaussian = np.zeros([input.shape[-2], input.shape[-1]])
            if self.opt.reconstruction_method == 'gaussian':
                pred_sem = np.zeros([self.opt.n_classes, input.shape[-2], input.shape[-1]])
            else:
                pred_sem = np.zeros([input.shape[-2], input.shape[-1]])

            # input is a tensor
            rgb_cache = [crop for crop in self.sliding_window_coords(input, test_stride, imageSize)]

            for input_crop_tuple in tqdm(rgb_cache, total=len(rgb_cache)):
                input_crop, (x1, x2, y1, y2) = input_crop_tuple
                input_crop = self.get_variable(input_crop)
                
                # ToDo: Deal with padding later
                if self.opt.use_padding:
                    from torch.nn import ReflectionPad2d

                    self.opt.padding = self.get_padding_image_dims(input_crop)

                    input_crop = ReflectionPad2d(self.opt.padding)(input_crop)
                    (pwl, pwr, phu, phb) = self.opt.padding
                    # target_crop = ReflectionPad2d(self.opt.padding)(target_crop)

                with torch.no_grad():
                    outG_sem = self.netG.forward(input_crop)
                
                if self.opt.reconstruction_method == 'gaussian':
                    outG_sem_prob = nn.Sigmoid()(outG_sem)
                    seg_map = outG_sem_prob.cpu().data[0].numpy()
                    pred_sem[:, y1:y2,x1:x2] += np.multiply(seg_map, prob_matrix)
                    pred_gaussian[y1:y2,x1:x2] += prob_matrix
                else:
                    pred_sem[y1:y2,x1:x2] = np.argmax(outG_sem.cpu().data[0].numpy(), axis=0)

                # visualize 
                visuals = OrderedDict([('input', input_crop.data),
                            ('out_sem', np.argmax(outG_sem.cpu().data[0].numpy(), axis=0))
                            ])
                self.display_test_results(visuals)

            if self.opt.save_samples:
                if self.opt.reconstruction_method == 'gaussian':
                    pred_sem = np.divide(pred_sem, pred_gaussian)
                self.save_images(input, [pred_sem], target=None, meta=None, index=it + 1, phase='test')
Beispiel #20
0
 def build(self):
     self.padding_module = ReflectionPad2d(self.pad_size)
Beispiel #21
0
  def f(data, opt):
    node.reset()
    node.trace(0, p='slomo start')
    _, oriHeight, oriWidth = data[0][0].size()
    width = upTruncBy32(oriWidth)
    height = upTruncBy32(oriHeight)
    pad = ReflectionPad2d((0, width - oriWidth, 0, height - oriHeight))
    unpad = lambda im: im[:, :oriHeight, :oriWidth]
    flowBackWarp = getFlowBack(opt, width, height)

    if not opt.batchSize:
      opt.batchSize = getBatchSize({'load': width * height})
      log.info('Slomo batch size={}'.format(opt.batchSize))
    batchSize = len(data)
    sf = opt.sf
    tempOut = [0 for _ in range(batchSize * sf + 1)]
    # Save reference frames
    if opt.notLast or opt.firstTime:
      tempOut[0] = func(data[0][0])
      outStart = 0
    else:
      outStart = 1
    for i, frames in enumerate(data):
      tempOut[(i + 1) * sf] = frames[1]

    # Load data
    I0 = pad(torch.stack([frames[0] for frames in data]))
    I1 = pad(torch.stack([frames[1] for frames in data]))
    flowOut = opt.flowComp(torch.cat((I0, I1), dim=1))
    F_0_1 = flowOut[:,:2,:,:]
    F_1_0 = flowOut[:,2:,:,:]
    node.trace()

    # Generate intermediate frames
    for intermediateIndex in range(1, sf):
      t = intermediateIndex / sf
      temp = -t * (1 - t)
      fCoeff = (temp, t * t, (1 - t) * (1 - t), temp)
      wCoeff = (1 - t, t)

      F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
      F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0

      g_I0_F_t_0 = flowBackWarp(I0, F_t_0)
      g_I1_F_t_1 = flowBackWarp(I1, F_t_1)

      intrpOut = opt.ArbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))

      F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
      F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
      V_t_0   = torch.sigmoid(intrpOut[:, 4:5, :, :])
      V_t_1   = 1 - V_t_0

      g_I0_F_t_0_f = flowBackWarp(I0, F_t_0_f)
      g_I1_F_t_1_f = flowBackWarp(I1, F_t_1_f)

      Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)

      # Save intermediate frame
      for i in range(batchSize):
        tempOut[intermediateIndex + i * sf] = unpad(Ft_p[i].detach())

      node.trace()
      tempOut[intermediateIndex] = func(tempOut[intermediateIndex])

    for i in range(sf, len(tempOut)):
      tempOut[i] = func(tempOut[i])
    res = []
    for item in tempOut[outStart:]:
      if type(item) == list:
        res.extend(item)
      elif type(item) != type(None):
        res.append(item)
    opt.firstTime = 0
    return res
    def test_raster_target(self, load_data):
        from dataloader.dataset_bank import dataset_vaihingen
        print('Test phase using {} split.'.format(self.opt.test_split))
        phase = 'test'

        use_semantics = ('semantics' in self.opt.tasks)
        self.opt.use_semantics = False
        # self.augmentation = augmentation
        imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        test_stride = self.opt.test_stride if len(self.opt.test_stride) == 2 else self.opt.test_stride * 2
        input_list, target_path = dataset_vaihingen(self.opt.dataroot, data_split=self.opt.test_split, phase='test', model=self.opt.model)


        data_loader = [(rgb, depth, meta) for rgb, depth, meta in load_data(input_list, target_path, phase='test')] # false because we do not have the GT        
        # no error in save semantics, same value to both variables
        self.netG.eval()

        # create a matrix with a gaussian distribution to be the weights during reconstruction
        prob_matrix = self.gaussian_kernel(imageSize[0], imageSize[1])
        targetSize = [320, 320]
        prob_matrix_tg = self.gaussian_kernel(targetSize[0], targetSize[1])

        for it, (input, target, meta) in enumerate(tqdm(data_loader)):
            rgb_cache = []
            depth_cache = []

            # concatenate probability matrix
            pred = np.zeros([input.shape[-2], input.shape[-1]])
            if self.opt.reconstruction_method == 'gaussian':
                pred = np.zeros([2, input.shape[-2], input.shape[-1]])
                pred_sem = np.zeros([self.opt.n_classes, input.shape[-2], input.shape[-1]])
            else:
                pred_sem = np.zeros([input.shape[-2], input.shape[-1]])
            target_reconstructed = np.zeros([2, input.shape[-2], input.shape[-1]])

            # input is a tensor
            rgb_cache = [crop for crop in self.sliding_window_coords(input, test_stride, imageSize)]

            target_cache = [crop for crop in self.sliding_window_coords(target[0], [50, 50], targetSize)]
            # reconstruct target
            for target_crop_tuple in tqdm(target_cache, total=len(target_cache)):
                (x1, x2, y1, y2) = target_crop_tuple[1]
                target_reconstructed[0, y1:y2,x1:x2] += prob_matrix_tg * (target_crop_tuple[0] - target_crop_tuple[0].min())
                target_reconstructed[1,y1:y2,x1:x2] += prob_matrix_tg
            gaussian = target_reconstructed[1]
            target_reconstructed = np.divide(target_reconstructed[0], gaussian)


            for input_crop_tuple in tqdm(rgb_cache, total=len(rgb_cache)):
                input_crop, (x1, x2, y1, y2) = input_crop_tuple
                input_crop = self.get_variable(input_crop)
                # self.complete_padding = True
                # ToDo: Deal with padding later
                if self.opt.use_padding:
                    from torch.nn import ReflectionPad2d

                    self.opt.padding = self.get_padding_image_dims(input_crop)

                    input_crop = ReflectionPad2d(self.opt.padding)(input_crop)
                    (pwl, pwr, phu, phb) = self.opt.padding
                    # target_crop = ReflectionPad2d(self.opt.padding)(target_crop)

                with torch.no_grad():
                    if use_semantics:
                        outG, outG_sem = self.netG.forward(input_crop)
                    else:
                        outG = self.netG.forward(input_crop)
                out_numpy = outG.data[0].cpu().float().numpy()
                if self.opt.reconstruction_method == 'concatenation':
                    if self.opt.use_padding:
                        pred[y1:y2,x1:x2] = (out_numpy[0])[phu:phu+self.opt.imageSize[1], pwl:pwl+self.opt.imageSize[0]]
                    else:
                        pred[y1:y2,x1:x2] = out_numpy[0]
                elif self.opt.reconstruction_method == 'gaussian':
                    pred[0,y1:y2,x1:x2] += np.multiply(out_numpy[0], prob_matrix)
                    pred[1,y1:y2,x1:x2] += prob_matrix
                
                if self.opt.save_semantics:
                    # pred_sem[:,y1:y2,x1:x2] += outG_sem.cpu().data[0].numpy()
                    if self.opt.reconstruction_method == 'gaussian':
                        # seg_map = np.argmax(outG_sem.cpu().data[0].numpy(), axis=0)
                        # pred_sem[y1:y2,x1:x2] += np.multiply(seg_map, prob_matrix)
                        outG_sem_prob = nn.Sigmoid()(outG_sem)
                        seg_map = outG_sem_prob.cpu().data[0].numpy()
                        pred_sem[:, y1:y2,x1:x2] += np.multiply(seg_map, prob_matrix)
                    else:
                        pred_sem[y1:y2,x1:x2] = np.argmax(outG_sem.cpu().data[0].numpy(), axis=0)
        
            if self.opt.save_samples:
                outputs = []
                if self.opt.reconstruction_method == 'gaussian':
                    gaussian = pred[1]
                    pred_height = np.divide(pred[0], gaussian)
                    outputs.append(pred_height)
                    del pred_height
                    if self.opt.save_semantics:
                        pred_sem = np.divide(pred_sem, gaussian)
                        outputs.append(pred_sem)
                        del pred_sem
                self.save_images(input, outputs, target_reconstructed, meta, it + 1, 'test')
    def test_bayesian(self, it, n_iters):
        error_list = []
        outG_list = []

        use_semantics = self.opt.use_semantics
        self.opt.use_semantics = False
        # self.augmentation = augmentation
        imageSize = self.opt.imageSize if len(self.opt.imageSize) == 2 else self.opt.imageSize * 2
        test_stride = self.opt.test_stride if len(self.opt.test_stride) == 2 else self.opt.test_stride * 2

        # create a matrix with a gaussian distribution to be the weights during reconstruction
        prob_matrix = self.gaussian_kernel(imageSize[0], imageSize[1])

        # for it, (input, target, meta_data, depth_patch_shape) in enumerate(tqdm(self.data_loader)):
        input, target, meta_data, depth_patch_shape = self.data_loader[it]
        for it in (tqdm(range(n_iters))):
            rgb_cache = []
            depth_cache = []
            self.meta_data = meta_data
            self.shape = depth_patch_shape
            # pred = np.zeros(input.shape[-2:])
            # concatenate probability matrix
            pred = np.zeros([input.shape[-2], input.shape[-1]])
            if self.opt.reconstruction_method == 'gaussian':
                pred = np.zeros([2, input.shape[-2], input.shape[-1]])
                pred_sem = np.zeros([self.opt.n_classes, input.shape[-2], input.shape[-1]])
            else:
                pred_sem = np.zeros([input.shape[-2], input.shape[-1]])
            target_reconstructed = np.zeros(input.shape[-2:])

            # input is a tensor
            rgb_cache = [crop for crop in self.sliding_window_coords(input, test_stride, imageSize)]
            depth_cache = [crop for crop in self.sliding_window_coords(target, test_stride, imageSize)] # don't need both

            for input_crop_tuple, target_crop_tuple in tqdm(zip(rgb_cache, depth_cache), total=len(rgb_cache)):
                input_crop, (x1, x2, y1, y2) = input_crop_tuple
                input_crop = self.get_variable(input_crop)
                # self.complete_padding = True
                # ToDo: Deal with padding later
                if self.opt.use_padding:
                    from torch.nn import ReflectionPad2d

                    self.opt.padding = self.get_padding_image_dims(input_crop)

                    input_crop = ReflectionPad2d(self.opt.padding)(input_crop)
                    (pwl, pwr, phu, phb) = self.opt.padding
                    # target_crop = ReflectionPad2d(self.opt.padding)(target_crop)

                with torch.no_grad():
                    outG, _ = self.netG.forward(input_crop)

                out_numpy = outG.data[0].cpu().float().numpy()
                if self.opt.reconstruction_method == 'concatenation':
                    if self.opt.use_padding:
                        pred[y1:y2,x1:x2] = (out_numpy[0])[phu:phu+self.opt.imageSize[1], pwl:pwl+self.opt.imageSize[0]]
                    else:
                        pred[y1:y2,x1:x2] = out_numpy[0]
                elif self.opt.reconstruction_method == 'gaussian':
                    pred[0,y1:y2,x1:x2] += np.multiply(out_numpy[0], prob_matrix)
                    pred[1,y1:y2,x1:x2] += prob_matrix
                
                target_reconstructed[y1:y2,x1:x2] = target_crop_tuple[0]

            if self.opt.reconstruction_method == 'gaussian':
                gaussian = pred[1]
                pred = np.divide(pred[0], gaussian)
                # pred_sem = np.divide(pred_sem, gaussian)
                
                error_list.append(np.abs(pred - target_reconstructed))
                outG_list.append(np.abs(pred))
            
        return error_list, outG_list, target_reconstructed