def restore(self, ckpt_path):
     logger.info(f'Restoring model from {ckpt_path}')
     checkpoint = torch.load(ckpt_path)
     self.model.load_state_dict(checkpoint['model'])
     self.optimizer.load_state_dict(checkpoint['optimizer'])
     epoch = checkpoint['epoch']
     logger.info(f'Successfully restored at epoch={epoch}')
     return epoch
 def save(self, ckpt_path, epoch):
     state = {
         'epoch': epoch,
         'model': self.model.state_dict(),
         'optimizer': self.optimizer.state_dict()
     }
     logger.info(f'Saving checkpoint to {ckpt_path}')
     torch.save(state, ckpt_path)
    def restore_latest(self, ckpt_dir):
        files = ckpt_dir.glob('*.pt')

        try:
            latest = max(files, key=os.path.getctime)
        except ValueError:
            logger.info(f'No checkpoints found in {ckpt_dir}')
            return 0
        return self.restore(latest)
    def __init__(self, epochs, logdir):
        pl.style.use('ggplot')
        self.loaders = self.get_loaders()

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        logger.info(f'Using pytorch device={self.device}')

        self.model = self.get_model().to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=self.learning_rate *
                                          self.batch_size)

        Path(logdir).mkdir(exist_ok=True)
        self.epochs = epochs
        self.logdir = logdir
        self.checkpoint_dir = Path(logdir) / 'checkpoints'
        self.checkpoint_dir.mkdir(exist_ok=True)
예제 #5
0
def get_loaders(traindir, batch_size, time_steps):
    training_videos = [
        '/home/users/daniel/data/shanghaitech/training/nvvl_videos/01_001.mp4'
    ]
    logger.info(f'Found {len(training_videos)} video files in training set.')
    testing_videos = [
        '/home/users/daniel/data/shanghaitech/testing/videos/01_0014.mp4'
    ]
    logger.info(f'Found {len(testing_videos)} video files in test set.')

    index_map = np.arange(16)
    processing = {
        'input':
        nvvl.ProcessDesc(
            #  scale_width=width, scale_height=height,
            normalized=True,
            dimension_order='cfhw',
            index_map=list(index_map))
    }
    frame_mask_dataset = FrameMaskDataset(
        '/home/users/daniel/data/shanghaitech/testing/test_frame_mask',
        index_map=index_map,
        video_paths=testing_videos)

    datasets = novelly.datasets.Split(
        nvvl.VideoDataset(training_videos, time_steps, processing=processing),
        nvvl.VideoDataset(testing_videos,
                          time_steps,
                          processing=processing,
                          get_label=frame_mask_dataset.get_label))
    loaders = novelly.datasets.Split(
        nvvl.VideoLoader(datasets.train,
                         batch_size=batch_size,
                         shuffle=True,
                         buffer_length=3),
        nvvl.VideoLoader(datasets.test, batch_size=batch_size,
                         buffer_length=3))
    return loaders, frame_mask_dataset