Esempio n. 1
0
    def val_epoch(self):
        total_loss=0
        num_batches=len(self.val_loader.batch_sampler)
        # self.model.load_state_dict(torch.load(self.model_dir+'best-model.pt')['model_state_dict'])
        self.model.eval() 
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)
        ade_one_sec_avg, fde_one_sec_avg ,ade_three_sec_avg, fde_three_sec_avg = (0,0,0,0)
        no_samples=0
        for i_batch,traj_dict in enumerate(self.val_loader):
            pred_traj=self.model(traj_dict,mode='validate')
            gt_traj=traj_dict['gt_unnorm_traj']
            loss=self.loss_fn(traj_dict['gt_unnorm_traj'].cuda(),pred_traj)
            # pdb.set_trace()
            total_loss=total_loss+loss.data
            avg_loss = float(total_loss)/(i_batch+1)
            batch_samples=gt_traj.shape[0]
            no_samples+=batch_samples

            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])

            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples
            
            print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {total_loss/(i_batch+1):.4f} \
            One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")
        print()
Esempio n. 2
0
    def val_epoch(self,epoch):
        total_loss=0
        num_batches=len(self.val_loader.batch_sampler)
        self.model.eval() 
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)
        ade_one_sec_avg, fde_one_sec_avg ,ade_three_sec_avg, fde_three_sec_avg = (0,0,0,0)
        no_samples=0
        for i_batch,traj_dict in enumerate(self.val_loader):
            pred_traj=self.model(traj_dict,mode='validate')
            gt_traj=traj_dict['gt_unnorm_traj'].cuda()
            loss=self.loss_fn(pred_traj,gt_traj)
            batch_samples=pred_traj.shape[0]
            no_samples+=batch_samples
            total_loss=total_loss+(loss.data*batch_samples)
            avg_loss = float(total_loss)/(no_samples)
            # pdb.set_trace()
            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])

            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples
            print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {avg_loss:.4f} Batch Loss {loss.data:.4f} \
            One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")
            # print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {avg_loss:.4f} \
            # One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            # Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")
            _filename = self.model_dir + 'best-model.pt'

        if ade_three_sec_avg < self.best_3_ade and fde_three_sec_avg < self.best_3_fde:    
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'opt_state_dict': optimizer.state_dict(),
                'loss': total_loss/(i_batch+1)
            }, _filename)

            self.best_1_ade = ade_one_sec_avg
            self.best_1_fde = fde_one_sec_avg
            self.best_3_ade = ade_three_sec_avg
            self.best_3_fde = fde_three_sec_avg
            self.best_model_updated=True
            print("\nModel updated")
        else:
            print()
Esempio n. 3
0
    def test_epoch(self):
        num_batches=len(self.test_loader.batch_sampler)
        batch_size=self.test_loader.batch_size
        self.model.eval()
        no_samples=0
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)

        for i_batch,traj_dict in enumerate(self.test_loader):
            input_traj=traj_dict['train_agent']
            gr_traj=traj_dict['gt_agent']
            if self.args.social:
                neighbour_traj=traj_dict['neighbour']
            if self.cuda:
                input_traj=input_traj.cuda()
                gr_traj=gr_traj.cuda()
            
            if self.args.social:
                neighbour_traj=traj_dict['neighbour']
                pred_traj=self.model({'agent_traj':input_traj,'neighbour_traj':neighbour_traj})
            else:   
                pred_traj=self.model(input_traj)
            batch_samples=input_traj.shape[0]
            no_samples+=batch_samples

            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gr_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gr_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gr_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gr_traj[i,:,:]) for i in range(batch_samples)])

            if (i_batch+1) % self.args.test_log_interval == 0:
                print(f"Test Iter {i_batch+1}/{num_batches} \
                    One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
                    Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")

            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples

            self.writer.scalar_summary('Test/1ADE', ade_one_sec_avg, i_batch+1)
            self.writer.scalar_summary('Test/3ADE', ade_three_sec_avg, i_batch+1)
            self.writer.scalar_summary('Test/1FDE', fde_one_sec_avg, i_batch+1)
            self.writer.scalar_summary('Test/3FDE', fde_three_sec_avg, i_batch+1)
        print()
        return ade_one_sec/no_samples,fde_one_sec/no_samples,ade_three_sec/no_samples,fde_three_sec/no_samples
    def validate_model(self, model_path):
        total_loss=0
        num_batches=len(self.val_loader.batch_sampler)
        self.model.load_state_dict(torch.load(model_path+'best-model.pt')['model_state_dict'])
        self.model.eval()
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)
        ade_one_sec_avg, fde_one_sec_avg ,ade_three_sec_avg, fde_three_sec_avg = (0,0,0,0)
        no_samples=0
        
        for i_batch,traj_dict in enumerate(self.val_loader):
            if self.model_type == 'VRAE':
                pred_traj, latent_traj, latent_mean, latent_logvar = self.model(traj_dict)
                pred_traj = self.val_loader.dataset.inverse_transform(pred_traj,traj_dict)
                kl_loss = -0.5 * torch.mean(1 + latent_logvar - latent_mean.pow(2) - latent_logvar.exp())
                mse_loss=self.loss_fn(pred_traj,gt_traj)
                loss = kl_loss + mse_loss
            else:
                pred_traj=self.model(traj_dict)
                pred_traj=self.val_loader.dataset.inverse_transform(pred_traj,traj_dict)
                loss=self.loss_fn(pred_traj,gt_traj)

            
            total_loss=total_loss+loss.data
            batch_samples=gt_traj.shape[0]           
            
            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            
            no_samples+=batch_samples
            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples

            print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {total_loss/(i_batch+1):.4f} \
            One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")

        print()
        self.save_results_single_pred()
Esempio n. 5
0
    def val_epoch(self, epoch):
        subseq_len = args.subseq_len
        val_loss = 0
        mems = []
        num_batches = len(self.val_loader.batch_sampler)

        ade_one_sec, fde_one_sec, ade_three_sec, fde_three_sec = (0, 0, 0, 0)
        ade_one_sec_avg, fde_one_sec_avg, ade_three_sec_avg, fde_three_sec_avg = (
            0, 0, 0, 0)
        no_samples = 0

        for i_batch, traj_dict in enumerate(self.val_loader):
            if mems:
                mems[0] = mems[0].detach()

            data = traj_dict['train_agent']
            target = traj_dict['gt_agent']

            if self.use_cuda:
                data = data.cuda()
                target = target.cuda()

            (_, _, pred_traj), mems = self.model(data,
                                                 target,
                                                 mems,
                                                 train_step=self.train_step,
                                                 f_thres=args.f_thres,
                                                 b_thres=args.b_thres,
                                                 subseq_len=subseq_len,
                                                 decode=True)

            loss = self.loss_fn(pred_traj, target)
            val_loss += loss.item()
            batch_samples = target.shape[0]

            ade_one_sec += sum([
                get_ade(pred_traj[i, :10, :], target[i, :10, :])
                for i in range(batch_samples)
            ])
            fde_one_sec += sum([
                get_fde(pred_traj[i, :10, :], target[i, :10, :])
                for i in range(batch_samples)
            ])
            ade_three_sec += sum([
                get_ade(pred_traj[i, :, :], target[i, :, :])
                for i in range(batch_samples)
            ])
            fde_three_sec += sum([
                get_fde(pred_traj[i, :, :], target[i, :, :])
                for i in range(batch_samples)
            ])

            no_samples += batch_samples
            ade_one_sec_avg = float(ade_one_sec) / no_samples
            ade_three_sec_avg = float(ade_three_sec) / no_samples
            fde_one_sec_avg = float(fde_one_sec) / no_samples
            fde_three_sec_avg = float(fde_three_sec) / no_samples

            print(
                f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {val_loss/(i_batch+1):.4f} \
            One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",
                end="\r")

            _filename = self.model_dir + 'best-model.pt'

            if ade_three_sec_avg < self.best_3_ade and fde_three_sec_avg < self.best_3_fde:
                torch.save(
                    {
                        'epoch': epoch,
                        'model_state_dict': model.state_dict(),
                        'opt_state_dict': optimizer.state_dict(),
                        'loss': val_loss / (i_batch + 1)
                    }, _filename)

                self.best_1_ade = ade_one_sec_avg
                self.best_1_fde = fde_one_sec_avg
                self.best_3_ade = ade_three_sec_avg
                self.best_3_fde = fde_three_sec_avg
                self.best_model_updated = True

        print()
        return val_loss / (
            num_batches
        ), ade_one_sec / no_samples, fde_one_sec / no_samples, ade_three_sec / no_samples, fde_three_sec / no_samples
    def val_epoch(self, epoch):
        total_loss=0
        num_batches=len(self.val_loader.batch_sampler)
        self.model.eval()
        
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)
        ade_one_sec_avg, fde_one_sec_avg ,ade_three_sec_avg, fde_three_sec_avg = (0,0,0,0)
        no_samples=0
        
        for i_batch,traj_dict in enumerate(self.val_loader):
            gt_traj = traj_dict['gt_unnorm_agent']
            if self.use_cuda:
                gt_traj=gt_traj.cuda()
            
            if self.model_type == 'VRAE':
                pred_traj, latent_traj, latent_mean, latent_logvar = self.model(traj_dict)
                pred_traj = self.val_loader.dataset.inverse_transform(pred_traj,traj_dict)
                kl_loss = -0.5 * torch.mean(1 + latent_logvar - latent_mean.pow(2) - latent_logvar.exp())
                mse_loss=self.loss_fn(pred_traj,gt_traj)
                loss = kl_loss + mse_loss
            else:
                pred_traj=self.model(traj_dict)
                pred_traj=self.val_loader.dataset.inverse_transform(pred_traj,traj_dict)
                loss=self.loss_fn(pred_traj,gt_traj)

            total_loss=total_loss+loss.data
            batch_samples=gt_traj.shape[0]           
            
            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gt_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gt_traj[i,:,:]) for i in range(batch_samples)])
            
            no_samples+=batch_samples
            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples

            print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {total_loss/(i_batch+1):.4f} \
            One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
            Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")
        
        _filename = self.model_dir + 'best-model.pt'

        if ade_three_sec_avg < self.best_3_ade and fde_three_sec_avg < self.best_3_fde:    
            torch.save({
                'epoch': epoch,
                'model_state_dict': self.model.state_dict(),
                'opt_state_dict': self.optimizer.state_dict(),
                'loss': total_loss/(i_batch+1)
            }, _filename)

            self.best_1_ade = ade_one_sec_avg
            self.best_1_fde = fde_one_sec_avg
            self.best_3_ade = ade_three_sec_avg
            self.best_3_fde = fde_three_sec_avg
            self.best_model_updated=True
        
        print()
        return total_loss/(num_batches), ade_one_sec/no_samples,fde_one_sec/no_samples,ade_three_sec/no_samples,fde_three_sec/no_samples
Esempio n. 7
0
    def val_epoch(self, epoch):
        total_loss=0
        num_batches=len(self.val_loader.batch_sampler)
        batch_size=self.val_loader.batch_size
        self.model.eval()
        
        ade_one_sec,fde_one_sec,ade_three_sec,fde_three_sec=(0,0,0,0)
        ade_one_sec_avg, fde_one_sec_avg ,ade_three_sec_avg, fde_three_sec_avg = (0,0,0,0)
        no_samples=0
        
        for i_batch,traj_dict in enumerate(self.val_loader):
            input_traj=traj_dict['train_agent']
            gr_traj=traj_dict['gt_agent']
            if self.args.social:
                neighbour_traj=traj_dict['neighbour']
            if self.cuda:
                input_traj=input_traj.cuda()
                gr_traj=gr_traj.cuda()
            if self.args.social and self.cuda:
                neighbour_traj=[neighbour.cuda() for neighbour in neighbour_traj]
            if self.args.social:
                pred_traj=self.model({'agent_traj':input_traj,'neighbour_traj':neighbour_traj})
            else:   
                pred_traj=self.model(input_traj)
            loss=self.loss_fn(pred_traj,gr_traj)
            total_loss=total_loss+loss.data
            batch_samples=input_traj.shape[0]
            no_samples+=batch_samples
            ade_one_sec+=sum([get_ade(pred_traj[i,:10,:],gr_traj[i,:10,:]) for i in range(batch_samples)])
            fde_one_sec+=sum([get_fde(pred_traj[i,:10,:],gr_traj[i,:10,:]) for i in range(batch_samples)])
            ade_three_sec+=sum([get_ade(pred_traj[i,:,:],gr_traj[i,:,:]) for i in range(batch_samples)])
            fde_three_sec+=sum([get_fde(pred_traj[i,:,:],gr_traj[i,:,:]) for i in range(batch_samples)])

            ade_one_sec_avg = float(ade_one_sec)/no_samples
            ade_three_sec_avg = float(ade_three_sec)/no_samples
            fde_one_sec_avg = float(fde_one_sec)/no_samples
            fde_three_sec_avg = float(fde_three_sec)/no_samples

            self.writer.scalar_summary('Val/AvgLoss', float(total_loss)/(i_batch+1), i_batch+1)

            if (i_batch+1) % self.args.val_log_interval == 0:
                print(f"Validation Iter {i_batch+1}/{num_batches} Avg Loss {total_loss/(i_batch+1):.4f} \
                One sec:- ADE:{ade_one_sec/(no_samples):.4f} FDE: {fde_one_sec/(no_samples):.4f}\
                Three sec:- ADE:{ade_three_sec/(no_samples):.4f} FDE: {fde_three_sec/(no_samples):.4f}",end="\r")

            _filename = self.model_dir + 'best-model.pt'

            if ade_one_sec_avg < self.best_1_ade and ade_three_sec_avg < self.best_3_ade and fde_one_sec_avg < self.best_1_fde and fde_three_sec_avg < self.best_3_fde:
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'opt_state_dict': optimizer.state_dict(),
                    'loss': total_loss/(i_batch+1)
                }, _filename)

                self.best_1_ade = ade_one_sec_avg
                self.best_1_fde = fde_one_sec_avg
                self.best_3_ade = ade_three_sec_avg
                self.best_3_fde = fde_three_sec_avg
        print()
        return total_loss/(num_batches), ade_one_sec/no_samples,fde_one_sec/no_samples,ade_three_sec/no_samples,fde_three_sec/no_samples