Ejemplo n.º 1
0
def test_lr_modifier_multi_step_yaml():
    lr_class = "MultiStepLR"
    lr_kwargs = {"milestones": MILESTONES, "gamma": GAMMA}
    start_epoch = 2.0
    end_epoch = 20.0
    init_lr = 0.1
    yaml_str = """
    !LearningRateModifier
        start_epoch: {}
        end_epoch: {}
        lr_class: {}
        lr_kwargs: {}
        init_lr: {}
    """.format(
        start_epoch, end_epoch, lr_class, lr_kwargs, init_lr
    )
    yaml_modifier = LearningRateModifier.load_obj(
        yaml_str
    )  # type: LearningRateModifier
    serialized_modifier = LearningRateModifier.load_obj(
        str(yaml_modifier)
    )  # type: LearningRateModifier
    obj_modifier = LearningRateModifier(
        start_epoch=start_epoch,
        end_epoch=end_epoch,
        lr_class=lr_class,
        lr_kwargs=lr_kwargs,
        init_lr=init_lr,
    )

    assert isinstance(yaml_modifier, LearningRateModifier)
    assert (
        yaml_modifier.start_epoch
        == serialized_modifier.start_epoch
        == obj_modifier.start_epoch
    )
    assert (
        yaml_modifier.end_epoch
        == serialized_modifier.end_epoch
        == obj_modifier.end_epoch
    )
    assert (
        yaml_modifier.update_frequency
        == serialized_modifier.update_frequency
        == obj_modifier.update_frequency
    )
    assert (
        yaml_modifier.lr_class == serialized_modifier.lr_class == obj_modifier.lr_class
    )
    assert (
        yaml_modifier.lr_kwargs
        == serialized_modifier.lr_kwargs
        == obj_modifier.lr_kwargs
    )
    assert yaml_modifier.init_lr == serialized_modifier.init_lr == obj_modifier.init_lr
Ejemplo n.º 2
0
def test_lr_modifier_cosine_annealing_yaml():
    lr_class = "CosineAnnealingWarmRestarts"
    lr_kwargs = {"lr_min": 0.001, "cycle_epochs": 5}
    start_epoch = 2.0
    end_epoch = 20.0
    init_lr = 0.1
    yaml_str = """
    !LearningRateModifier
        start_epoch: {}
        end_epoch: {}
        lr_class: {}
        lr_kwargs: {}
        init_lr: {}
    """.format(
        start_epoch, end_epoch, lr_class, lr_kwargs, init_lr
    )
    yaml_modifier = LearningRateModifier.load_obj(
        yaml_str
    )  # type: LearningRateModifier
    serialized_modifier = LearningRateModifier.load_obj(
        str(yaml_modifier)
    )  # type: LearningRateModifier
    obj_modifier = LearningRateModifier(
        start_epoch=start_epoch,
        end_epoch=end_epoch,
        lr_class=lr_class,
        lr_kwargs=lr_kwargs,
        init_lr=init_lr,
    )

    assert isinstance(yaml_modifier, LearningRateModifier)
    assert (
        yaml_modifier.start_epoch
        == serialized_modifier.start_epoch
        == obj_modifier.start_epoch
    )
    assert (
        yaml_modifier.end_epoch
        == serialized_modifier.end_epoch
        == obj_modifier.end_epoch
    )
    assert (
        yaml_modifier.update_frequency
        == serialized_modifier.update_frequency
        == obj_modifier.update_frequency
    )
    assert (
        yaml_modifier.lr_class == serialized_modifier.lr_class == obj_modifier.lr_class
    )
    assert (
        yaml_modifier.lr_kwargs
        == serialized_modifier.lr_kwargs
        == obj_modifier.lr_kwargs
    )
    assert yaml_modifier.init_lr == serialized_modifier.init_lr == obj_modifier.init_lr
Ejemplo n.º 3
0
##############################

GAMMA = 0.1
EPOCH_APPLY_RANGE = 15


@pytest.mark.skipif(
    os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
    reason="Skipping pytorch tests",
)
@pytest.mark.parametrize(
    "modifier_lambda",
    [
        lambda: LearningRateModifier(
            lr_class="ExponentialLR",
            lr_kwargs={"gamma": 0.9},
            start_epoch=0,
            init_lr=0.1,
        ),
        lambda: LearningRateModifier(
            lr_class="ExponentialLR",
            lr_kwargs={"gamma": 0.5},
            start_epoch=5,
            end_epoch=13,
            init_lr=0.1,
        ),
    ],
    scope="function",
)
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize(
    "optim_lambda",
Ejemplo n.º 4
0
def create_config(project: Project, optim: ProjectOptimization,
                  framework: str) -> str:
    """
    Creates a optimization config yaml for a given project and optimization

    :param project: project to create with
    :param optim: project optimizer to create with
    :param framework: the framework to create the config for
    """
    # add imports in function so they don't fail if they don't have env setup
    # for frameworks other than the requested
    if framework == "pytorch":
        from sparseml.pytorch.optim import (
            EpochRangeModifier,
            GMPruningModifier,
            LearningRateModifier,
            ScheduledModifierManager,
            SetLearningRateModifier,
            TrainableParamsModifier,
        )
    elif framework == "tensorflow":
        from sparseml.tensorflow_v1.optim import (
            EpochRangeModifier,
            GMPruningModifier,
            LearningRateModifier,
            ScheduledModifierManager,
            SetLearningRateModifier,
            TrainableParamsModifier,
        )
    else:
        _LOGGER.error("Unsupported framework {} provided".format(framework))
        raise ValidationError(
            "Unsupported framework {} provided".format(framework))

    mods = [
        EpochRangeModifier(
            start_epoch=optim.start_epoch
            if optim.start_epoch is not None else -1,
            end_epoch=optim.end_epoch if optim.end_epoch is not None else -1,
        )
    ]
    node_weight_name_lookup = {
        node["id"]: node["weight_name"]
        for node in project.model.analysis["nodes"] if node["prunable"]
    }

    for mod in optim.pruning_modifiers:
        sparsity_to_params = {}

        for node in mod.nodes:
            # node is coming from DB, so already had prunable checks
            # add assert here to fail early for non prunable nodes
            assert node["node_id"] in node_weight_name_lookup

            sparsity = node["sparsity"]
            node_id = node["node_id"]
            weight_name = node_weight_name_lookup[node_id]

            if sparsity is None:
                continue

            if sparsity not in sparsity_to_params:
                sparsity_to_params[sparsity] = []

            sparsity_to_params[sparsity].append(weight_name)

        for sparsity, params in sparsity_to_params.items():
            gm_pruning = GMPruningModifier(
                init_sparsity=0.05,
                final_sparsity=sparsity,
                start_epoch=mod.start_epoch
                if mod.start_epoch is not None else -1,
                end_epoch=mod.end_epoch if mod.end_epoch is not None else -1,
                update_frequency=mod.update_frequency
                if mod.update_frequency else -1,
                params=params,
            )

            if mod.mask_type:
                gm_pruning.mask_type = mod.mask_type

            mods.append(gm_pruning)

    for lr_schedule_modifier in optim.lr_schedule_modifiers:
        for mod in lr_schedule_modifier.lr_mods:
            mod = ProjectOptimizationModifierLRSchema().dump(mod)
            start_epoch = mod["start_epoch"] if mod[
                "start_epoch"] is not None else -1
            end_epoch = mod["end_epoch"] if mod["end_epoch"] is not None else -1

            if mod["clazz"] == "set":
                mods.append(
                    SetLearningRateModifier(
                        learning_rate=mod["init_lr"],
                        start_epoch=start_epoch,
                    ))
            else:
                lr_class_mapping = {
                    "step": "StepLR",
                    "multi_step": "MultiStepLR",
                    "exponential": "ExponentialLR",
                }
                assert mod["clazz"] in lr_class_mapping
                mods.append(
                    LearningRateModifier(
                        lr_class=lr_class_mapping[mod["clazz"]],
                        lr_kwargs=mod["args"],
                        init_lr=mod["init_lr"],
                        start_epoch=start_epoch,
                        end_epoch=end_epoch,
                    ))

    for trainable_modifier in optim.trainable_modifiers:
        mod = ProjectOptimizationModifierTrainableSchema().dump(
            trainable_modifier)
        start_epoch = mod["start_epoch"] if mod[
            "start_epoch"] is not None else -1
        end_epoch = mod["end_epoch"] if mod["end_epoch"] is not None else -1

        if "nodes" not in mod:
            continue
        trainable_nodes = []
        untrainable_nodes = []
        for node in mod["nodes"]:
            assert node["node_id"] in node_weight_name_lookup

            weight_name = node_weight_name_lookup[node["node_id"]]
            if node["trainable"]:
                trainable_nodes.append(weight_name)
            else:
                untrainable_nodes.append(weight_name)

        if len(trainable_nodes) > 0:
            mods.append(
                TrainableParamsModifier(trainable_nodes,
                                        True,
                                        start_epoch=start_epoch,
                                        end_epoch=end_epoch))

        if len(untrainable_nodes) > 0:
            mods.append(
                TrainableParamsModifier(
                    untrainable_nodes,
                    False,
                    start_epoch=start_epoch,
                    end_epoch=end_epoch,
                ))
    # TODO: add quantization support when ready

    return str(ScheduledModifierManager(mods))