Exemple #1
0
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(3)
        progress = tqdm(total=len(self.loader.dataset))

        for ind, tensors in enumerate(self.loader):
            tensors = [Variable(t.float().cuda()) for t in tensors]
            inp, mask, refl_targ, depth_targ, shape_targ, lights_targ = tensors
            self.optimizer.zero_grad()
            refl_pred, depth_pred, shape_pred, lights_pred = self.model.forward(
                inp, mask)
            refl_loss = self.criterion(refl_pred, refl_targ)
            depth_loss = self.criterion(depth_pred, depth_targ)
            shape_loss = self.criterion(shape_pred, shape_targ)
            lights_loss = self.criterion(lights_pred, lights_targ)
            loss = refl_loss + depth_loss + shape_loss + (lights_loss *
                                                          self.lights_mult)
            loss.backward()
            self.optimizer.step()

            losses.update(
                [l.data for l in [refl_loss, shape_loss, lights_loss]])
            progress.update(self.loader.batch_size)
            progress.set_description('%.5f | %.5f | %.5f | %.3f' %
                                     (refl_loss.data[0], depth_loss.data[0],
                                      shape_loss.data[0], lights_loss.data[0]))
        print('<Train> Losses: ', losses.avgs)
        return losses.avgs
Exemple #2
0
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(9)

        for ind, (unlabeled, labeled) in enumerate(self.loader):
            unlabeled = [Variable(t.float().cuda(async=True)) for t in unlabeled]
            labeled = [Variable(t.float().cuda(async=True)) for t in labeled]
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(3)
        progress = tqdm(total=len(self.loader.dataset))

        for ind, tensors in enumerate(self.loader):
            tensors = [Variable(t.float().cuda(async=True)) for t in tensors]
            inp, mask, refl_targ, depth_targ, shape_targ, lights_targ = tensors
Exemple #4
0
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(1)
        progress = tqdm( total=len(self.loader.dataset) )

        for ind, tensors in enumerate(self.loader):

            inp = [ Variable( t.float().cuda(async=True) ) for t in tensors[:-1] ]
            targ = Variable( tensors[-1].float().cuda(async=True) )
Exemple #5
0
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(1)
        progress = tqdm(total=len(self.loader.dataset))

        for ind, tensors in enumerate(self.loader):
            inp = [Variable(t.float().cuda(non_blocking=True)) for t in tensors[:-1]]
            targ = Variable(tensors[-1].float().cuda(non_blocking=True))

            self.optimizer.zero_grad()
            out = self.model.forward(*inp)
            loss = self.criterion(out, targ)
            loss.backward()
            self.optimizer.step()

            losses.update([loss.item()])
            progress.update(self.loader.batch_size)
            progress.set_description(str(loss.item()))
        return losses.avgs
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(3)
        progress = tqdm(total=len(self.loader.dataset))

        for ind, tensors in enumerate(self.loader):
            tensors = [
                Variable(t.float().cuda(1, non_blocking=True)) for t in tensors
            ]
            inp, mask, refl_targ, depth_targ, shape_targ, lights_targ = tensors
            self.optimizer.zero_grad()
            # refl_pred, depth_pred, shape_pred, lights_pred = self.model.forward(
            #     inp, mask, keepdim=True
            # )
            refl_pred, depth_pred, shape_pred, lights_pred = self.model.forward(
                inp, mask)
            refl_loss = self.criterion(refl_pred, refl_targ)
            depth_loss = self.criterion(depth_pred, depth_targ)
            shape_loss = self.criterion(shape_pred, shape_targ)
            lights_loss = self.criterion(lights_pred, lights_targ)
            loss = (refl_loss + depth_loss + shape_loss +
                    (lights_loss * self.lights_mult))
            loss.backward()
            self.optimizer.step()

            # losses.update([l.data[0] for l in [refl_loss, shape_loss, lights_loss]])
            losses.update(
                [l.item() for l in [refl_loss, shape_loss, lights_loss]])
            progress.update(self.loader.batch_size)
            progress.set_description("%.5f | %.5f | %.5f | %.3f" % (
                # refl_loss.data[0],
                # depth_loss.data[0],
                # shape_loss.data[0],
                # lights_loss.data[0],
                refl_loss.item(),
                depth_loss.item(),
                shape_loss.item(),
                lights_loss.item(),
            ))
        print("<Train> Losses: ", losses.avgs)
        return losses.avgs
Exemple #7
0
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(1)
        progress = tqdm(total=len(self.loader.dataset))
        tqdm.monitor_interval = 0
        for ind, tensors in enumerate(self.loader):

            inp = [t.float().to(self.device) for t in tensors[:-1]]
            targ = tensors[-1].float().to(self.device)

            self.optimizer.zero_grad()
            out = self.model.forward(*inp)
            loss = self.criterion(out, targ)
            loss.backward()
            self.optimizer.step()

            losses.update([loss.item()])
            progress.update(self.loader.batch_size)
            progress.set_description(str(loss.item()))
            if losses.count * self.loader.batch_size > self.epoch_size:
                break
        return losses.avgs
    def __epoch(self):
        self.model.train()
        losses = pipeline.AverageMeter(9)

        for ind, (unlabeled, labeled) in enumerate(self.loader):
            unlabeled = [
                Variable(t.float().cuda(non_blocking=True)) for t in unlabeled
            ]
            labeled = [
                Variable(t.float().cuda(non_blocking=True)) for t in labeled
            ]
            un_inp, un_mask = unlabeled
            (
                lab_inp,
                lab_mask,
                lab_refl_targ,
                lab_depth_targ,
                lab_shape_targ,
                lab_lights_targ,
                lab_shad_targ,
            ) = labeled

            self.optimizer.zero_grad()
            (
                un_recon,
                _,
                un_depth_pred,
                un_shape_pred,
                un_lights_pred,
                un_shad_pred,
            ) = self.model.forward(un_inp, un_mask)
            (
                lab_recon,
                lab_refl_pred,
                lab_depth_pred,
                lab_shape_pred,
                lab_lights_pred,
                lab_shad_pred,
            ) = self.model.forward(lab_inp, lab_mask)

            un_loss = self.criterion(un_recon, un_inp)

            refl_loss = self.criterion(lab_refl_pred, lab_refl_targ)
            depth_loss = self.criterion(lab_depth_pred, lab_depth_targ)
            shape_loss = self.criterion(lab_shape_pred, lab_shape_targ)
            lights_loss = self.criterion(lab_lights_pred, lab_lights_targ)
            shad_loss = self.criterion(lab_shad_pred, lab_shad_targ)
            lab_loss = (refl_loss + shape_loss +
                        (lights_loss * self.lights_mult) + shad_loss)

            loss = (self.un_mult * un_loss) + (self.lab_mult * lab_loss)

            if self.style_fn:
                style_loss = self.style_fn(un_shape_pred, lab_shape_targ)
                loss += self.style_mult * style_loss
            else:
                style_loss = Variable(torch.zeros(1))

            if self.normals_fn:
                approx_normals = self.normals_fn(un_depth_pred, mask=un_mask)
                depth_normals_loss = self.criterion(un_shape_pred,
                                                    approx_normals.detach())
                loss += self.correspondence_mult * depth_normals_loss
            else:
                depth_normals_loss = Variable(torch.zeros(1))

            if self.relight_fn:
                relit = self.relight_fn(
                    self.model.shader,
                    un_shape_pred,
                    un_lights_pred,
                    2,
                    sigma=self.relight_sigma,
                )
                relit_mean = relit.mean(0).squeeze()[:, 0]
                relight_loss = self.criterion(un_shad_pred,
                                              relit_mean.detach().cuda())
                loss += self.relight_mult * relight_loss
            else:
                relight_loss = Variable(torch.zeros(1))

            loss.backward()
            self.optimizer.step()

            loss_data = [
                l.item() for l in [
                    un_loss,
                    refl_loss,
                    depth_loss,
                    shape_loss,
                    lights_loss,
                    shad_loss,
                    depth_normals_loss,
                    relight_loss,
                    style_loss,
                ]
            ]
            losses.update(loss_data)
            if losses.count * self.loader.batch_size > self.epoch_size:
                break
        print("Losses: ", losses.avgs)
        return losses.avgs