def __getitem__(self, idx):
        instance = self.pairframe[idx]
        frame1, frame2 = [*map(lambda x: Image.open(x), instance['frame'])]
        sample = {}
        if self.transform:
            sample['frame1'] = self.transform(frame1)
            sample['frame2'] = self.transform(frame2)

            sample['frame1Unet'] = self.transformunet(frame1)
            sample['frame2Unet'] = self.transformunet(frame2)

            if self.visualize and (not self.test):
                flow = torch.tensor(getflow(instance['flow'])).permute(
                    2, 0, 1).unsqueeze(0)
                frame1Unet_ = self.transformunet(ToPILImage()(warper(
                    flow,
                    ToTensor()(frame2).unsqueeze(0),
                    scaled=False,
                    nocuda=True)[0]))
                flow = flow2rgb(flow)[0]
                sample['flow'] = self.transform(ToPILImage()(flow))
                sample['occlusion'] = self.transform(
                    Image.open(instance['occlusion']))

                sample['flowUnet'] = self.transformunet(ToPILImage()(flow))
                sample['occlusionUnet'] = self.transformunet(
                    Image.open(instance['occlusion']))
                sample['frame1Unet_'] = frame1Unet_
        return sample
    def train_epoch_end(self, metrics):
        self.resetsample()
        self.model.eval()
        with torch.no_grad():
            frame1 = self.sample_train['frame1'].to(self.device)
            frame2 = self.sample_train['frame2'].to(self.device)

            frame1Unet = self.sample_train['frame1Unet'].to(self.device)
            frame2Unet = self.sample_train['frame2Unet'].to(self.device)
            frame1Unet_ = self.sample_train['frame1Unet_'].to(self.device)

            flow, occ = self.model(frame1, frame2)
            flow = flow * 256.
            frame1_ = warper(flow, frame2Unet)

            occ = replicatechannel(occ)

            #without unet
            # sampocc = replicatechannel(self.sample_train['occlusion'].cuda())
            #with unet
            sampocc = replicatechannel(
                self.sample_train['occlusionUnet'].cuda())

            occs = torch.cat([sampocc, occ])
            occs = make_grid(occs, nrow=10).unsqueeze(0)

            #without unet
            # frames = torch.cat([frame1_, frame1, frame2])
            # with unet
            frames = torch.cat([frame1_, frame1Unet_, frame1Unet, frame2Unet])

            frames = make_grid(frames, nrow=10).unsqueeze(0)

            #without unet
            # flows = torch.cat([flow2rgb(flow.cpu()).cuda(), self.sample_train['flow'].cuda()])
            # with unet
            flows = torch.cat([
                flow2rgb(flow.cpu(), scaled=True).cuda(),
                self.sample_train['flowUnet'].cuda()
            ])
            flows = make_grid(flows, nrow=10).unsqueeze(0)

            self.writer.add_images('TRAIN/Frames', frames,
                                   metrics.get('nb_batch'))
            self.writer.add_images('TRAIN/Flows', flows,
                                   metrics.get('nb_batch'))
            self.writer.add_images('TRAIN/Occlusions', occs,
                                   metrics.get('nb_batch'))

        return self.val(metrics)
    def train(self, nb_epoch):
        trainstream = tqdm(self.train_loader.load())
        self.avg_loss = AverageMeter()
        self.model.train()
        for i, data in enumerate(trainstream):
            self.global_step += 1
            trainstream.set_description('TRAINING')

            # GET X and Frame 2
            # wdt = data['displacement'].to(self.device)
            frame2 = data['frame2'].to(self.device)
            frame1 = data['frame1'].to(self.device)

            frame1Unet = data['frame1Unet'].to(self.device)
            frame2Unet = data['frame2Unet'].to(self.device)
            self.optimizer.zero_grad()

            # forward
            with torch.set_grad_enabled(True):
                flow, occ = self.model(frame1, frame2)
                frame1_ = warper(-flow, frame2Unet)
                #WITHOUT UNET
                # loss = photometricloss(frame1, frame1_, occ)
                #WITH UNET
                # loss = photometricloss(frame1Unet, frame1_,frame2Unet, occ)
                loss = comboloss(frame1Unet, frame2Unet, frame1_, occ)
                self.avg_loss.update(loss.item(), i + 1)
                loss.backward()
                self.optimizer.step()

                self.writer.add_scalar('Loss/train', self.avg_loss.avg,
                                       self.global_step)

                trainstream.set_postfix({
                    'epoch': nb_epoch,
                    'loss': self.avg_loss.avg
                })
        self.scheduler.step(loss)
        trainstream.close()
        return self.train_epoch_end({
            'TRloss': self.avg_loss.avg,
            'epoch': nb_epoch,
        })
    def test_end(self, metrics):
        with torch.no_grad():
            frame1 = self.sample_test['frame1'].to(self.device)
            frame2 = self.sample_test['frame2'].to(self.device)

            frame1Unet = self.sample_test['frame1Unet'].to(self.device)
            frame2Unet = self.sample_test['frame2Unet'].to(self.device)

            flow, occ = self.model(frame1, frame2)
            frame1_ = warper(-flow, frame2Unet)
            occ = replicatechannel(occ)

            frames = torch.cat([
                frame1_, frame1Unet, frame2Unet,
                flow2rgb(-flow.cpu()).cuda(), occ
            ])
            frames = make_grid(frames, nrow=10).unsqueeze(0)

            self.writer.add_images('TEST/Frames', frames,
                                   metrics.get('nb_batch'))
        return metrics
    def test(self, metrics={}):
        teststream = tqdm(self.test_loader.load())
        self.avg_loss = AverageMeter()
        with torch.no_grad():
            for i, data in enumerate(teststream):
                teststream.set_description('TESTING')
                frame2 = data['frame2'].to(self.device)
                frame1 = data['frame1'].to(self.device)

                frame2Unet = data['frame2Unet'].to(self.device)
                frame1Unet = data['frame1Unet'].to(self.device)

                flow, occ = self.model(frame1, frame2)
                frame1_ = warper(flow, frame2)
                # loss = photometricloss(frame1Unet, frame1_,frame2Unet, occ)
                loss = comboloss(frame1Unet, frame2Unet, frame1_, occ)
                self.avg_loss.update(loss.item(), i + 1)
                metrics.update({'TSloss': self.avg_loss.avg})
                teststream.set_postfix(metrics)
        self.writer.add_scalar('Loss/test', self.avg_loss.avg,
                               metrics.get('epoch'))
        teststream.close()

        return self.test_end(metrics)
    def train(self, nb_epoch):
        trainstream = tqdm(self.train_loader.load())
        self.avg_loss = AverageMeter()
        self.model.train()
        for i, data in enumerate(trainstream):
            self.global_step += 1
            trainstream.set_description('TRAINING')

            # GET X and Frame 2
            # wdt = data['displacement'].to(self.device)
            frame2 = data['frame2'].to(self.device)
            frame1 = data['frame1'].to(self.device)

            frame1Unet = data['frame1Unet'].to(self.device)
            frame2Unet = data['frame2Unet'].to(self.device)

            # frame1Unet1 = data['frame1Unet1'].to(self.device)
            # frame2Unet1 = data['frame2Unet1'].to(self.device)
            #
            # frame1Unet2 = data['frame1Unet2'].to(self.device)
            # frame2Unet2 = data['frame2Unet2'].to(self.device)
            #
            # frame1Unet3 = data['frame1Unet3'].to(self.device)
            # frame2Unet3 = data['frame2Unet3'].to(self.device)

            # frame1Unet4 = data['frame1Unet4'].to(self.device)
            # frame2Unet4 = data['frame2Unet4'].to(self.device)

            # frame1Unet5 = data['frame1Unet5'].to(self.device)
            # frame2Unet5 = data['frame2Unet5'].to(self.device)
            #
            # frame1Unet6 = data['frame1Unet6'].to(self.device)
            # frame2Unet6 = data['frame2Unet6'].to(self.device)

            self.optimizer.zero_grad()

            # forward
            with torch.set_grad_enabled(True):
                # flow1, flow2, flow3, flow4, flow5, flow6, flow, occ1, occ2, occ3, occ4, occ5, occ6, occ = self.model(frame1, frame2)
                flow, occ = self.model(frame1, frame2)

                print(flow.shape)
                print(frame2Unet.shape)

                frame1_ = warper(flow, frame2Unet)
                # frame1_1 = warper(flow1, frame2Unet1)
                # frame1_2 = warper(flow2, frame2Unet2)
                # frame1_3 = warper(flow3, frame2Unet3)
                # frame1_4 = warper(flow4, frame2Unet4)
                # frame1_5 = warper(flow5, frame2Unet5)
                # frame1_6 = warper(flow6, frame2Unet6)

                loss = comboloss(frame1Unet, frame2Unet, frame1_, occ)
                # loss1_1 = comboloss(frame1Unet1, frame2Unet1, frame1_1, occ1)
                # loss1_2 = comboloss(frame1Unet2, frame2Unet2, frame1_2, occ2)
                # loss1_3 = comboloss(frame1Unet3, frame2Unet3, frame1_3, occ3)
                # loss1_4 = comboloss(frame1Unet4, frame2Unet4, frame1_4, occ4)
                # loss1_5 = comboloss(frame1Unet5, frame2Unet5, frame1_5, occ5)
                # loss1_6 = comboloss(frame1Unet6, frame2Unet6, frame1_6, occ6)

                # loss = (loss1_ + loss1_4)/2.
                # loss = (loss1_ + loss1_4 + loss1_5 + loss1_6) / 4.

                # loss = (loss1_ + loss1_1 + loss1_2 + loss1_3 + loss1_4 + loss1_5 + loss1_6) / 7.

                #WITHOUT UNET
                # loss = photometricloss(frame1, frame1_, occ)
                #WITH UNET
                # loss = photometricloss(frame1Unet, frame1_,frame2Unet, occ)
                # loss = comboloss(frame1Unet,frame2Unet,frame1_,occ)
                self.avg_loss.update(loss.item(), i + 1)
                loss.backward()
                self.optimizer.step()

                self.writer.add_scalar('Loss/train', self.avg_loss.avg,
                                       self.global_step)

                trainstream.set_postfix({
                    'epoch': nb_epoch,
                    'loss': self.avg_loss.avg
                })
        self.scheduler.step(loss)
        trainstream.close()
        return self.train_epoch_end({
            'TRloss': self.avg_loss.avg,
            'epoch': nb_epoch,
        })