def get_one_parameter_model_params_after_training_steps(
        model: nn.Module,
        train_loader: DataLoader,
        num_steps: int = 1) -> List[nn.Parameter]:
    with set_torch_seed():
        model = perform_model_training_steps(model, train_loader, num_steps)
    return list(model.parameters())
Exemple #2
0
def create_finetuned_lenet_model_and_dataloader(config,
                                                eval_fn,
                                                finetuning_steps,
                                                learning_rate=1e-3):
    with set_torch_seed():
        train_loader = create_ones_mock_dataloader(config, num_samples=10)
        model = LeNet()
        for param in model.parameters():
            nn.init.uniform_(param, a=0.0, b=0.01)

        data_loader = iter(train_loader)
        optimizer = SGD(model.parameters(), lr=learning_rate)
        for _ in range(finetuning_steps):
            optimizer.zero_grad()
            x, y_gt = next(data_loader)
            y = model(x)
            loss = F.mse_loss(y.sum(), y_gt)
            loss.backward()
            optimizer.step()

    config = register_default_init_args(
        config,
        train_loader=train_loader,
        model_eval_fn=partial(eval_fn, train_loader=train_loader))
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    return model, train_loader, compression_ctrl
def get_lenet_params_after_training_steps(
        model: nn.Module,
        train_loader: DataLoader,
        num_steps: int = 1) -> Dict[str, List[nn.Parameter]]:
    with set_torch_seed():
        model = perform_model_training_steps(model, train_loader, num_steps)
    return get_params_grouped_by_algorithms(model)
def create_initialized_one_parameter_model_and_dataloader(
        parameter_cls: type,
        init_requires_grad: bool,
        requires_grad_settings: List[Tuple[str, bool]],
        multiplier: float = None) -> [nn.Module, DataLoader]:
    with set_torch_seed():
        data = torch.randn(size=(1, 1, 5, 5))
        if parameter_cls is nn.Parameter:
            param = parameter_cls(data, requires_grad=init_requires_grad)
        elif parameter_cls is CompressionParameter:
            param = parameter_cls(data,
                                  requires_grad=init_requires_grad,
                                  compression_lr_multiplier=multiplier)
        else:
            raise Exception(f'Unsupported parameter type: {parameter_cls}')

    for setting_type, requires_grad in requires_grad_settings:
        if setting_type == 'attr':
            param.requires_grad = requires_grad
        elif setting_type == 'fn':
            param.requires_grad_(requires_grad)
        else:
            raise Exception(f'Unsupported setting type: {setting_type}')

    model = OneParameterModel(param)
    train_loader = DataLoader(RandomDatasetMock(model.INPUT_SIZE),
                              batch_size=1,
                              shuffle=False,
                              num_workers=0,
                              drop_last=True)
    return model, train_loader
def create_initialized_lenet_model_and_dataloader(
        config: NNCFConfig) -> Tuple[nn.Module, DataLoader]:
    with set_torch_seed():
        train_loader = create_random_mock_dataloader(config, num_samples=10)
        model = LeNet()
        for param in model.parameters():
            nn.init.normal_(param)
        model = create_initialized_compressed_model(model, config,
                                                    train_loader)
    return model, train_loader
Exemple #6
0
 def validate_fn(model, epoch=0, train_loader=None):
     with set_torch_seed():
         train_loader = iter(train_loader)
         loss = torch.FloatTensor([0])
         with torch.no_grad():
             for _ in range(num_steps):
                 x, y_gt = next(train_loader)
                 y = model(x)
                 loss += F.mse_loss(y.sum(), y_gt)
     return 1 - loss.item()
Exemple #7
0
 def validate_fn(model, epoch, train_loader=train_loader):
     with set_torch_seed():
         train_loader = iter(train_loader)
         loss = 0
         with torch.no_grad():
             for _ in range(num_steps):
                 x, y_gt = next(train_loader)
                 y = model(x)
                 loss += F.mse_loss(y.sum(), y_gt)
     return loss.item()
Exemple #8
0
def create_initialized_lenet_model_and_dataloader(
        config: NNCFConfig) -> Tuple[nn.Module, DataLoader]:
    with set_torch_seed():
        train_loader = create_random_mock_dataloader(config, num_samples=10)
        model = LeNet()
        for param in model.parameters():
            nn.init.uniform_(param, a=0.0, b=0.01)
    model, compression_ctrl = create_compressed_model_and_algo_for_test(
        model, config)
    return model, train_loader, compression_ctrl
Exemple #9
0
 def train_fn(compression_ctrl,
              model,
              optimizer,
              train_loader=train_loader,
              **kwargs):
     with set_torch_seed():
         train_loader = iter(train_loader)
         for _ in range(num_steps):
             compression_ctrl.scheduler.step()
             optimizer.zero_grad()
             x, y_gt = next(train_loader)
             y = model(x)
             loss = F.mse_loss(y.sum(), y_gt)
             loss.backward()
             optimizer.step()
def perform_model_training_steps(model: nn.Module,
                                 train_loader: DataLoader,
                                 num_steps: int = 1) -> nn.Module:
    with set_torch_seed():
        train_loader = iter(train_loader)
        optimizer = SGD(model.parameters(), lr=0.1)

        # This block of code is needed to initialize scale in the binarization algorithm
        # TODO: perform binarization scale init in the same way as for quantization
        with torch.no_grad():
            x, y_gt = next(train_loader)
            model(x)

        for _ in range(num_steps):
            optimizer.zero_grad()
            x, y_gt = next(train_loader)
            y = model(x)
            loss = F.mse_loss(y.sum(), y_gt)

            loss.backward()
            optimizer.step()

    return model