Beispiel #1
0
    def test_pruning_scheduler(self):
        model = TorchModel()
        config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]

        task_generator = AGPTaskGenerator(1, model, config_list)
        pruner = L1NormPruner(model, config_list)
        scheduler = PruningScheduler(pruner, task_generator)

        scheduler.compress()
Beispiel #2
0
def run_task_generator(task_generator_type):
    model = TorchModel()
    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]

    if task_generator_type == 'agp':
        task_generator = AGPTaskGenerator(5, model, config_list)
    elif task_generator_type == 'linear':
        task_generator = LinearTaskGenerator(5, model, config_list)
    elif task_generator_type == 'lottery_ticket':
        task_generator = LotteryTicketTaskGenerator(5, model, config_list)
    elif task_generator_type == 'simulated_annealing':
        task_generator = SimulatedAnnealingTaskGenerator(model, config_list)

    count = run_task_generator_(task_generator)

    if task_generator_type == 'agp':
        assert count == 6
    elif task_generator_type == 'linear':
        assert count == 6
    elif task_generator_type == 'lottery_ticket':
        assert count == 5
    elif task_generator_type == 'simulated_annealing':
        assert count == 17
Beispiel #3
0
                                weight_decay=5e-4)
    criterion = torch.nn.CrossEntropyLoss()

    # pre-train the model
    for i in range(5):
        trainer(model, optimizer, criterion, i)

    # No need to pass model and config_list to pruner during initializing when using scheduler.
    pruner = L1NormPruner(None, None)

    # you can specify the log_dir, all intermediate results and best result will save under this folder.
    # if you don't want to keep intermediate results, you can set `keep_intermediate_result=False`.
    config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
    task_generator = AGPTaskGenerator(10,
                                      model,
                                      config_list,
                                      log_dir='.',
                                      keep_intermediate_result=True)

    dummy_input = torch.rand(10, 3, 32, 32).to(device)

    # if you just want to keep the final result as the best result, you can pass evaluator as None.
    # or the result with the highest score (given by evaluator) will be the best result.

    # scheduler = PruningScheduler(pruner, task_generator, finetuner=finetuner, speedup=True, dummy_input=dummy_input, evaluator=evaluator)
    scheduler = PruningScheduler(pruner,
                                 task_generator,
                                 finetuner=finetuner,
                                 speedup=True,
                                 dummy_input=dummy_input,
                                 evaluator=None,
Beispiel #4
0
from nni.algorithms.compression.v2.pytorch.pruning.tools import AGPTaskGenerator
from nni.compression.pytorch.utils import count_flops_params
from .config.utils import parse_params, parse_basic_pruner

# TODO: move this function to evaluate module
def sigmoid(x: float, theta0: float = -0.5, theta1: float = 10) -> float:
    return 1 / (1 + math.exp(-theta1 * (x + theta0)))

if __name__ == '__main__':
    kwargs = nni.get_next_parameter()
    pruner_config, config_list, vessel, original_target, thetas = parse_params(kwargs)
    basic_pruner, model, finetuner, evaluator, dummy_input, device = parse_basic_pruner(pruner_config, config_list, vessel)

    # TODO: move following logic to excution engine
    log_dir = Path(os.environ['NNI_OUTPUT_DIR']) if 'NNI_OUTPUT_DIR' in os.environ else Path('nni_outputs', 'log')
    task_generator = AGPTaskGenerator(total_iteration=3, origin_model=model, origin_config_list=config_list,
                                      skip_first_iteration=True, log_dir=log_dir)
    speedup = dummy_input is not None
    scheduler = PruningScheduler(pruner=basic_pruner, task_generator=task_generator, finetuner=finetuner, speedup=speedup,
                                 dummy_input=dummy_input, evaluator=None)
    scheduler.compress()
    _, model, _, _, _ = scheduler.get_best_result()
    metric = evaluator(model)
    flops, params, _ = count_flops_params(model, dummy_input, verbose=False, mode='full')

    # TODO: more efficient way to calculate or combine these scores
    flops_score = sigmoid(flops / original_target['flops'], *thetas['flops'])
    params_score = sigmoid(params / original_target['params'], *thetas['params'])
    metric_score = sigmoid(metric / original_target['metric'], *thetas['metric'])
    final_result = flops_score + params_score + metric_score

    nni.report_final_result({'default': final_result, 'flops': flops, 'params': params, 'metric': metric})