def update_stats(self): """ Update the model with precise statistics. Users can manually call this method. """ if self._disabled: return if self._data_iter is None: self._data_iter = iter(self._data_loader) def data_loader(): for num_iter in itertools.count(1): if num_iter % 100 == 0: self._logger.info( "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter) ) # This way we can reuse the same iterator yield next(self._data_iter) with EventStorage(): # capture events in a new storage to discard them self._logger.info( "Running precise-BN for {} iterations... ".format(self._num_iter) + "Note that this could produce different statistics every time." ) update_bn_stats(self._model, data_loader(), self._num_iter)
def calculate_and_update_precise_bn(loader, model, num_iters=200, use_gpu=True): def _gen_loader(): for inputs, *_ in loader: if use_gpu: if isinstance(inputs, (list, )): for i in range(len(inputs)): inputs[i] = inputs[i].cuda(non_blocking=True) else: inputs = inputs.cuda(non_blocking=True) yield inputs # Update the bn stats. update_bn_stats(model, _gen_loader(), num_iters)
def calculate_and_update_precise_bn(loader, model, num_iters=200): """ Update the stats in bn layers by calculate the precise stats. Args: loader (loader): data loader to provide training data. model (model): model to update the bn stats. num_iters (int): number of iterations to compute and update the bn stats. """ def _gen_loader(): for inputs, _, _, _ in loader: if isinstance(inputs, (list, )): for i in range(len(inputs)): inputs[i] = inputs[i].cuda(non_blocking=True) else: inputs = inputs.cuda(non_blocking=True) yield inputs # Update the bn stats. update_bn_stats(model, _gen_loader(), num_iters)
def update_stats(self): if self._disabled: return if self._data_iter is None: self._data_iter = iter(self._data_loader) def data_loader(): for num_iter in itertools.count(1): if num_iter % 100 == 0: self._logger.info( f"Running precise-BN ... {num_iter}/{self._num_iter} iterations." ) yield next(self._data_iter) with EventStorage(): self._logger.info( "Running precise-BN for {} iterations... ".format(self._num_iter) + "Note that this could produce different statistics every time." ) update_bn_stats(self._model, data_loader(), self._num_iter)
def update_stats(self): """ Update the model with precise statistics. Users can manually call this method. """ if self._disabled: return if self._data_iter is None: self._data_iter = iter(self._data_loader) if self._data_loader_ssl is not None and self._data_ssl_iter is None: self._data_ssl_iter = iter(self._data_loader_ssl) num_iter = 0 def data_loader(): nonlocal num_iter while True: num_iter += 1 if num_iter % 100 == 0: self._logger.info( "Running precise-BN ... {}/{} iterations.".format( num_iter, self._num_iter)) # This way we can reuse the same iterator data = next(self._data_iter) if self._data_loader_ssl is not None: data_ssl = next(self._data_ssl_iter) yield [data, data_ssl] else: yield [data] with EventStorage(): # capture events in a new storage to discard them self._logger.info( "Running precise-BN for {} iterations... ".format( self._num_iter) + "Note that this could produce different statistics every time." ) update_bn_stats(self._model, data_loader(), self._num_iter)
def on_phase_end(self, task) -> None: if not task.train: return if self.cache_samples: iterator = _get_iterator(self.cache, task.use_gpu) num_batches = len(self.cache) else: num_batches = int(math.ceil(self.num_samples / self.batch_size)) task.build_dataloaders_for_current_phase() task.create_data_iterators() if num_batches > len(task.data_iterator): num_batches = len(task.data_iterator) logging.info( f"Reduce no. of samples to {num_batches * self.batch_size}" ) iterator = _get_iterator(task.data_iterator, task.use_gpu) update_bn_stats(task.base_model, iterator, num_batches)
self.bias = nn.Parameter(torch.zeros(channels)) self._pos = 0 def forward(self, x): ret = self[self._pos](x) self._pos = (self._pos + 1) % len(self) w = self.weight.reshape(1, -1, 1, 1) b = self.bias.reshape(1, -1, 1, 1) return ret * w + b if __name__ == "__main__": checkpoint = sys.argv[1] cfg = LazyConfig.load_rel("./configs/retinanet_SyncBNhead.py") model = cfg.model model.head.norm = lambda c: CycleBatchNormList(len(model.head_in_features), c) model = instantiate(model) model.cuda() DetectionCheckpointer(model).load(checkpoint) cfg.dataloader.train.total_batch_size = 8 logger.info("Running PreciseBN ...") with EventStorage(), torch.no_grad(): update_bn_stats(model, instantiate(cfg.dataloader.train), 500) logger.info("Running evaluation ...") inference_on_dataset(model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator))
def on_phase_end(self, task) -> None: if not task.train: return iterator = _get_iterator(self.cache, task.use_gpu) num_batches = len(self.cache) update_bn_stats(task.base_model, iterator, num_batches)