def train_step(batch_iterator, compression_ctrl, config, criterion, net, train_data_loader): batch_loss_l = torch.tensor(0.).to(config.device) batch_loss_c = torch.tensor(0.).to(config.device) batch_loss = torch.tensor(0.).to(config.device) for _ in range(0, config.iter_size): # load train data try: images, targets = next(batch_iterator) except StopIteration: logger.debug("StopIteration: can not load batch") batch_iterator = iter(train_data_loader) break images = images.to(config.device) targets = [ anno.requires_grad_(False).to(config.device) for anno in targets ] # forward out = net(images) # backprop loss_l, loss_c = criterion(out, targets) loss_comp = compression_ctrl.loss() loss = loss_l + loss_c + loss_comp batch_loss += loss loss.backward() batch_loss_l += loss_l batch_loss_c += loss_c return batch_iterator, batch_loss, batch_loss_c, batch_loss_l, loss_comp
def load_weights(self, base_file): _, ext = os.path.splitext(base_file) if ext == '.pkl' or '.pth': logger.debug('Loading weights into state dict...') self.load_state_dict(torch.load(base_file, map_location=lambda storage, loc: storage)) logger.debug('Finished!') else: logger.error('Sorry only .pth and .pkl files supported.')
def build_ssd_vgg(cfg, size, num_classes, config): ssd_vgg = SSD_VGG(cfg, size, num_classes, batch_norm=config.get('batchnorm', False)) if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None): logger.debug('Loading base network...') basenet_weights = torch.load(config.basenet) new_weights = {} for wn, wv in basenet_weights.items(): wn = wn.replace('features.', '') new_weights[wn] = wv load_state(ssd_vgg.basenet, new_weights, is_resume=False) return ssd_vgg
def build_ssd_mobilenet(cfg, size, num_classes, config): if size != 300: raise ValueError("Only Mobilenet-SSD with input size 300 is supported") mobilenet_ssd = MobileNetSSD(num_classes, cfg) if config.basenet and (config.resuming_checkpoint is None) and (config.weights is None): logger.debug('Loading base network...') basenet_weights = torch.load(config.basenet)['state_dict'] new_weights = {} for wn, wv in basenet_weights.items(): wn = wn.replace('model.', '') new_weights[wn] = wv load_state(mobilenet_ssd.basenet, new_weights, is_resume=False) return mobilenet_ssd
def load_weights(self, base_file): _, ext = os.path.splitext(base_file) if ext == '.pkl' or '.pth': logger.debug('Loading weights into state dict...') # # ** WARNING: torch.load functionality uses Python's pickling facilities that # may be used to perform arbitrary code execution during unpickling. Only load the data you # trust. # self.load_state_dict( torch.load(base_file, map_location=lambda storage, loc: storage)) logger.debug('Finished!') else: logger.error('Sorry only .pth and .pkl files supported.')
def build_ssd_mobilenet(cfg, size, num_classes, config): if size != 300: raise ValueError("Only Mobilenet-SSD with input size 300 is supported") mobilenet_ssd = MobileNetSSD(num_classes, cfg) if config.basenet and (config.resuming_checkpoint_path is None) and (config.weights is None): logger.debug('Loading base network...') # # ** WARNING: torch.load functionality uses Python's pickling facilities that # may be used to perform arbitrary code execution during unpickling. Only load the data you # trust. # basenet_weights = torch.load(config.basenet)['state_dict'] new_weights = {} for wn, wv in basenet_weights.items(): wn = wn.replace('model.', '') new_weights[wn] = wv load_state(mobilenet_ssd.basenet, new_weights, is_resume=False) return mobilenet_ssd