def _make_images_board(self, model):
        model.eval()
        num_imgs = 64
        fuseTrans = self.cfg.fuseTrans

        batch = next(iter(self.data_loaders[1]))
        input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

        with torch.set_grad_enabled(False):
            XYZ, maskLogit = model(input_images)
            # ------ build transformer ------
            XYZid, ML = transform.fuse3D(
                self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
            newDepth, newMaskLogit, collision = transform.render2D(
                self.cfg, XYZid, ML, renderTrans)  # [B,N,1,H,W]

        return {'RGB': utils.make_grid( input_images[:num_imgs]),
                'depth': utils.make_grid(
                    ((1-newDepth)*(collision==1).float())[:num_imgs, 0, 0:1, :, :]),
                'depthGT': utils.make_grid(
                    1-depthGT[:num_imgs, 0, 0:1, :, :]),
                'mask': utils.make_grid(
                    torch.sigmoid(maskLogit[:num_imgs, 0:1,:, :])),
                'mask_rendered': utils.make_grid(
                    torch.sigmoid(newMaskLogit[:num_imgs, 0, 0:1, :, :])),
                'maskGT': utils.make_grid(
                    maskGT[:num_imgs, 0, 0:1, :, :]),
                }
示例#2
0
    def _make_images_board(self, model):
        model.eval()
        num_imgs = 64
        fuseTrans = self.cfg.fuseTrans

        batch = next(iter(self.data_loaders[1]))
        input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(
            batch, self.cfg.device)

        with torch.set_grad_enabled(False):
            XYZ, maskLogit = model(input_images)
            # ------ build transformer ------
            XYZid, ML = transform.fuse3D(self.cfg, XYZ, maskLogit,
                                         fuseTrans)  # [B,3,VHW],[B,1,VHW]
            newDepth, newMaskLogit, collision = transform.render2D(
                self.cfg, XYZid, ML, renderTrans)  # [B,N,1,H,W]

        return {
            'RGB':
            utils.make_grid(input_images[:num_imgs]),
            'depth':
            utils.make_grid(
                ((1 - newDepth) * (collision == 1).float())[:num_imgs, 0,
                                                            0:1, :, :]),
            'depthGT':
            utils.make_grid(1 - depthGT[:num_imgs, 0, 0:1, :, :]),
            'mask':
            utils.make_grid(torch.sigmoid(maskLogit[:num_imgs, 0:1, :, :])),
            'mask_rendered':
            utils.make_grid(
                torch.sigmoid(newMaskLogit[:num_imgs, 0, 0:1, :, :])),
            'maskGT':
            utils.make_grid(maskGT[:num_imgs, 0, 0:1, :, :]),
        }
    def _val_on_epoch(self, model):
        model.eval()

        data_loader = self.data_loaders[1]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for batch in data_loader:
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

            with torch.set_grad_enabled(False):
                XYZ, maskLogit = model(input_images)
                ####################################################################
                ##################################
                tmp_1 = torch.cat([maskLogit[:,0:1,:,:],maskLogit[:,2:3,:,:],maskLogit[:,4:5,:,:],
                                   maskLogit[:,6:7,:,:],maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],
                                   maskLogit[:,11:12,:,:],maskLogit[:,13:14,:,:]],1)

                #print(tmp_1.size())
                tmp_2 = torch.cat([maskLogit[:,1:2,:,:],maskLogit[:,3:4,:,:],maskLogit[:,5:6,:,:],
                                   maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],maskLogit[:,11:12,:,:],
                                   maskLogit[:,13:14,:,:],maskLogit[:,15:16,:,:]],1)
                #mask = (maskLogit > 0).byte()
                mask = (maskLogit[:,8:16,:,:] > 0).byte()
                #mask = (tmp_2 > 0).byte()
                mask = mask.float()
#                 print(mask.size())
                ###################################
                # ------ build transformer ------
                ######################################################################
                #XYZid, ML = transform.fuse3D(
                #    self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
                XYZid, ML = transform.fuse3D(
                    self.cfg, XYZ, mask, fuseTrans) # [B,3,VHW],[B,1,VHW]
                ######################################################################
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(
                    newDepth.masked_select(collision==1),
                    depthGT.masked_select(collision==1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tVal loss: {epoch_loss}")

        return {"epoch_loss_depth": epoch_loss_depth,
                "epoch_loss_mask": epoch_loss_mask,
                "epoch_loss": epoch_loss, }
示例#4
0
    def _train_on_epoch(self, model, optimizer):
        model.train()

        data_loader = self.data_loaders[0]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for self.iteration, batch in enumerate(data_loader, self.iteration):
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(
                batch, self.cfg.device)

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(self.cfg, XYZ, maskLogit,
                                             fuseTrans)  # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(newDepth.masked_select(collision == 1),
                                     depthGT.masked_select(collision == 1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'], param.data)
                optimizer.step()

            if self.on_after_batch is not None:
                if self.cfg.lrSched.lower() in "cyclical":
                    self.on_after_batch(self.iteration)
                else:
                    self.on_after_batch(self.epoch)

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tTrain loss: {epoch_loss}")

        return {
            "epoch_loss_depth": epoch_loss_depth,
            "epoch_loss_mask": epoch_loss_mask,
            "epoch_loss": epoch_loss,
        }
示例#5
0
    def findLR(self,
               model,
               optimizer,
               writer,
               start_lr=1e-7,
               end_lr=10,
               num_iters=50):

        model.train()

        lrs = np.logspace(np.log10(start_lr), np.log10(end_lr), num_iters)
        losses = []
        fuseTrans = self.cfg.fuseTrans

        for lr in lrs:
            # Update LR
            for group in optimizer.param_groups:
                group['lr'] = lr

            batch = next(iter(self.data_loaders[0]))
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(
                batch, self.cfg.device)

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(self.cfg, XYZ, maskLogit,
                                             fuseTrans)  # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(newDepth.masked_select(collision == 1),
                                     depthGT.masked_select(collision == 1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'], param.data)
                optimizer.step()

            losses.append(loss.item())

        fig, ax = plt.subplots()
        ax.plot(lrs, losses)
        ax.set_xlabel('learning rate')
        ax.set_ylabel('loss')
        ax.set_xscale('log')
        writer.add_figure('findLR', fig)
    def _train_on_epoch(self, model, optimizer):
        model.train()

        data_loader = self.data_loaders[0]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for self.iteration, batch in enumerate(data_loader, self.iteration):
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(
                    self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(
                    newDepth.masked_select(collision==1),
                    depthGT.masked_select(collision==1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'], param.data)
                optimizer.step()

            if self.on_after_batch is not None:
                if self.cfg.lrSched.lower() in "cyclical":
                    self.on_after_batch(self.iteration)
                else: self.on_after_batch(self.epoch)

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tTrain loss: {epoch_loss}")

        return {"epoch_loss_depth": epoch_loss_depth,
                "epoch_loss_mask": epoch_loss_mask,
                "epoch_loss": epoch_loss, }
    def findLR(self, model, optimizer, writer,
               start_lr=1e-7, end_lr=10, num_iters=50):

        model.train()

        lrs = np.logspace(np.log10(start_lr), np.log10(end_lr), num_iters)
        losses = []
        fuseTrans = self.cfg.fuseTrans

        for lr in lrs:
            # Update LR
            for group in optimizer.param_groups:
                group['lr'] = lr

            batch = next(iter(self.data_loaders[0]))
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(
                    self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(
                    newDepth.masked_select(collision==1),
                    depthGT.masked_select(collision==1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'],
                                param.data)
                optimizer.step()

            losses.append(loss.item())

        fig, ax = plt.subplots()
        ax.plot(lrs, losses)
        ax.set_xlabel('learning rate')
        ax.set_ylabel('loss')
        ax.set_xscale('log')
        writer.add_figure('findLR', fig)
    def _make_images_board(self, model):
        model.eval()
        num_imgs = 64
        fuseTrans = self.cfg.fuseTrans

        batch = next(iter(self.data_loaders[1]))
        input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

        with torch.set_grad_enabled(False):
            XYZ, maskLogit = model(input_images)
            ##################################
            tmp_1 = torch.cat([maskLogit[:,0:1,:,:],maskLogit[:,2:3,:,:],maskLogit[:,4:5,:,:],
                               maskLogit[:,6:7,:,:],maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],
                               maskLogit[:,11:12,:,:],maskLogit[:,13:14,:,:]],1)

            #print(tmp_1.size())
            tmp_2 = torch.cat([maskLogit[:,1:2,:,:],maskLogit[:,3:4,:,:],maskLogit[:,5:6,:,:],
                               maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],maskLogit[:,11:12,:,:],
                               maskLogit[:,13:14,:,:],maskLogit[:,15:16,:,:]],1)
            #mask = (maskLogit > 0).byte()
            maskLogit = (maskLogit[:,8:16,:,:] > 0).byte()
            maskLogit = maskLogit.float()
            #mask = (tmp_2 > 0).byte()
#                 print(mask.size())
            ###################################
            # ------ build transformer ------
            XYZid, ML = transform.fuse3D(
                self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
            newDepth, newMaskLogit, collision = transform.render2D(
                self.cfg, XYZid, ML, renderTrans)  # [B,N,1,H,W]

        return {'RGB': utils.make_grid( input_images[:num_imgs]),
                'depth': utils.make_grid(
                    ((1-newDepth)*(collision==1).float())[:num_imgs, 0, 0:1, :, :]),
                'depthGT': utils.make_grid(
                    1-depthGT[:num_imgs, 0, 0:1, :, :]),
                'mask': utils.make_grid(
                    torch.sigmoid(maskLogit[:num_imgs, 0:1,:, :])),
                'mask_rendered': utils.make_grid(
                    torch.sigmoid(newMaskLogit[:num_imgs, 0, 0:1, :, :])),
                'maskGT': utils.make_grid(
                    maskGT[:num_imgs, 0, 0:1, :, :]),
                }
示例#9
0
    def _val_on_epoch(self, model):
        model.eval()

        data_loader = self.data_loaders[1]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for batch in data_loader:
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(
                batch, self.cfg.device)

            with torch.set_grad_enabled(False):
                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(self.cfg, XYZ, maskLogit,
                                             fuseTrans)  # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(newDepth.masked_select(collision == 1),
                                     depthGT.masked_select(collision == 1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tVal loss: {epoch_loss}")

        return {
            "epoch_loss_depth": epoch_loss_depth,
            "epoch_loss_mask": epoch_loss_mask,
            "epoch_loss": epoch_loss,
        }
    def _val_on_epoch(self, model):
        model.eval()

        data_loader = self.data_loaders[1]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for batch in data_loader:
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

            with torch.set_grad_enabled(False):
                XYZ, maskLogit = model(input_images)
                # ------ build transformer ------
                XYZid, ML = transform.fuse3D(
                    self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
                # ------ Compute loss ------
                loss_depth = self.l1(
                    newDepth.masked_select(collision==1),
                    depthGT.masked_select(collision==1))
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tVal loss: {epoch_loss}")

        return {"epoch_loss_depth": epoch_loss_depth,
                "epoch_loss_mask": epoch_loss_mask,
                "epoch_loss": epoch_loss, }
示例#11
0
     tf.float32, shape=[opt.batchSize, opt.novelN, opt.H, opt.W, 1])
 maskGT = tf.placeholder(tf.float32,
                         shape=[opt.batchSize, opt.novelN, opt.H, opt.W, 1])
 PH = [inputImage, renderTrans, depthGT, maskGT]
 # ------ build encoder-decoder ------
 encoder = graph.encoder if opt.arch=="original" else \
     graph.encoder_resnet if opt.arch=="resnet" else None
 decoder = graph.decoder if opt.arch=="original" else \
     graph.decoder_resnet if opt.arch=="resnet" else None
 latent = encoder(opt, inputImage)
 XYZ, maskLogit = decoder(opt, latent)  # [B,H,W,3V],[B,H,W,V]
 mask = tf.to_float(maskLogit > 0)
 # ------ build transformer ------
 fuseTrans = tf.nn.l2_normalize(opt.fuseTrans, dim=1)
 XYZid, ML = transform.fuse3D(opt, XYZ, maskLogit, fuseTrans)  # [B,1,VHW]
 newDepth, newMaskLogit, collision = transform.render2D(
     opt, XYZid, ML, renderTrans)  # [B,N,H,W,1]
 # ------ define loss ------
 loss_depth = graph.masked_l1_loss(newDepth - depthGT, tf.equal(
     collision, 1)) / (opt.batchSize * opt.novelN)
 loss_mask = graph.cross_entropy_loss(newMaskLogit,
                                      maskGT) / (opt.batchSize * opt.novelN)
 loss = loss_mask + opt.lambdaDepth * loss_depth
 # ------ optimizer ------
 lr_PH = tf.placeholder(tf.float32, shape=[])
 optim = tf.train.AdamOptimizer(learning_rate=lr_PH).minimize(loss)
 # ------ generate summaries ------
 summaryImage = [
     util.imageSummary(opt, "image_RGB", inputImage, opt.inH, opt.inW),
     util.imageSummary(opt, "image_depth/pred",
                       ((1 - newDepth) *
                        tf.to_float(tf.equal(collision, 1)))[:, 0, :, :,
    def _train_on_epoch(self, model, optimizer):
        model.train()

        data_loader = self.data_loaders[0]
        running_loss_depth = 0.0
        running_loss_mask = 0.0
        running_loss = 0.0
        fuseTrans = self.cfg.fuseTrans

        for self.iteration, batch in enumerate(data_loader, self.iteration):
            input_images, renderTrans, depthGT, maskGT = utils.unpack_batch_novel(batch, self.cfg.device)

            with torch.set_grad_enabled(True):
                optimizer.zero_grad()

                XYZ, maskLogit = model(input_images)
#                 print(XYZ.size())
#                 print(maskLogit.size())
                ####################################################################
                ##################################
                tmp_1 = torch.cat([maskLogit[:,0:1,:,:],maskLogit[:,2:3,:,:],maskLogit[:,4:5,:,:],
                                   maskLogit[:,6:7,:,:],maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],
                                   maskLogit[:,11:12,:,:],maskLogit[:,13:14,:,:]],1)

                #print(tmp_1.size())
                tmp_2 = torch.cat([maskLogit[:,1:2,:,:],maskLogit[:,3:4,:,:],maskLogit[:,5:6,:,:],
                                   maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],maskLogit[:,11:12,:,:],
                                   maskLogit[:,13:14,:,:],maskLogit[:,15:16,:,:]],1)
                #mask = (maskLogit > 0).byte()
                mask = (maskLogit[:,8:16,:,:] > 0).byte()
                #mask = (tmp_2 > 0).byte()
                mask = mask.float()
#                 print(mask.size())
                ###################################
#                 print(mask.type())
#                 print(mask.size())
#                 print(maskGT.type())
#                 print(maskGT.size())                
                # ------ build transformer ------
#                 XYZid, ML = transform.fuse3D(
#                     self.cfg, XYZ, maskLogit, fuseTrans) # [B,3,VHW],[B,1,VHW]
                #######################################################

                XYZid, ML = transform.fuse3D(
                    self.cfg, XYZ, mask, fuseTrans) # [B,3,VHW],[B,1,VHW]
                #######################################################
#                 print(XYZid.size())
#                 print(ML.size())
                newDepth, newMaskLogit, collision = transform.render2D(
                    self.cfg, XYZid, ML, renderTrans)  # [B,N,H,W,1]
#                 print(newDepth.size())
#                 print(newMaskLogit.size())
#                 print(collision.size())
#                 print(maskGT.size())

                # ------ Compute loss ------
                #######################################################
                #loss_mask = self.sigmoid_bce(maskLogit, maskGT.long())
#                 print(maskLogit[:,0:8,:,:].type())
#                 print(maskLogit[:,0:8,:,:].size())
#                 tmp_1 = torch.cat([maskLogit[:,0:1,:,:],maskLogit[:,2:3,:,:],maskLogit[:,4:5,:,:],
#                                    maskLogit[:,6:7,:,:],maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],
#                                    maskLogit[:,11:12,:,:],maskLogit[:,13:14,:,:]],1)
                                   
#                 #print(tmp_1.size())
#                 tmp_2 = torch.cat([maskLogit[:,1:2,:,:],maskLogit[:,3:4,:,:],maskLogit[:,5:6,:,:],
#                                    maskLogit[:,7:8,:,:],maskLogit[:,9:10,:,:],maskLogit[:,11:12,:,:],
#                                    maskLogit[:,13:14,:,:],maskLogit[:,15:16,:,:]],1)
#                 #print(tmp_2.size())

#                 #maskLogit = torch.cat([tmp_1,tmp_2],4)
#                 maskLogit = torch.stack([tmp_1,tmp_2],dim=1)
                #print(maskLogit.size())
                #maskLogit = torch.stack([maskLogit[:,0:8,:,:],maskLogit[:,8:16,:,:]],dim=1)
#                 print(maskLogit.type())
#                 print(maskLogit.size())
                #loss_mask = self.cross_entropy(maskLogit, maskGT.long())
                
                #######################################################
                #loss_mask = self.sigmoid_bce(maskLogit, maskGT)
                
                
                loss_depth = self.l1(
                    newDepth.masked_select(collision==1),
                    depthGT.masked_select(collision==1))
#                 print(newMaskLogit.size())
#                 print(maskGT.size())
                loss_mask = self.sigmoid_bce(newMaskLogit, maskGT)
                loss = loss_mask + self.cfg.lambdaDepth * loss_depth

                # Update weights
                loss.backward()
                # True Weight decay
                if self.cfg.trueWD is not None:
                    for group in optimizer.param_groups:
                        for param in group['params']:
                            param.data = param.data.add(
                                -self.cfg.trueWD * group['lr'], param.data)
                optimizer.step()

            if self.on_after_batch is not None:
                if self.cfg.lrSched.lower() in "cyclical":
                    self.on_after_batch(self.iteration)
                else: self.on_after_batch(self.epoch)

            running_loss_depth += loss_depth.item() * input_images.size(0)
            running_loss_mask += loss_mask.item() * input_images.size(0)
            running_loss += loss.item() * input_images.size(0)

        epoch_loss_depth = running_loss_depth / len(data_loader.dataset)
        epoch_loss_mask = running_loss_mask / len(data_loader.dataset)
        epoch_loss = running_loss / len(data_loader.dataset)

        print(f"\tTrain loss: {epoch_loss}")

        return {"epoch_loss_depth": epoch_loss_depth,
                "epoch_loss_mask": epoch_loss_mask,
                "epoch_loss": epoch_loss, }