Ejemplo n.º 1
0
def extract_weights(
    model_name_a: str,
    model_a: nn.Module,
    model_name_b: str,
    model_b: nn.Module,
    base_name_to_sets_of_related_ops: Optional[Dict[
        str, Set[NSNodeTargetType]]] = None,
    unmatchable_types_map: Optional[Dict[str, Set[NSNodeTargetType]]] = None,
) -> NSResultsType:
    torch._C._log_api_usage_once(
        "quantization_api._numeric_suite_fx.extract_weights")
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    # TODO(future PR): expose these
    skipped_module_names: List[str] = []
    skipped_module_classes: List[Callable] = []
    tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
    tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
    gm_a = GraphModule(model_a, tracer_a.trace(model_a))
    gm_b = GraphModule(model_b, tracer_b.trace(model_b))
    return _extract_weights_impl(model_name_a, gm_a, model_name_b, gm_b,
                                 base_name_to_sets_of_related_ops,
                                 unmatchable_types_map)
Ejemplo n.º 2
0
def compare_weights(
    model_name_a: str,
    gm_a: GraphModule,
    model_name_b: str,
    gm_b: GraphModule,
) -> NSResultsType:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)
    matched_subgraph_pairs = get_matching_subgraph_pairs(gm_a, gm_b)

    # split the subgraph pairs into one data structure for each model
    nodes_and_names_to_instrument_a: List[Tuple[Node, str]] = []
    nodes_and_names_to_instrument_b: List[Tuple[Node, str]] = []
    for match_name, match in matched_subgraph_pairs.items():
        (node_start_a, node_end_a), (node_start_b, node_end_b) = match
        nodes_and_names_to_instrument_a.append((node_start_a, match_name))
        nodes_and_names_to_instrument_b.append((node_start_b, match_name))

    # populate the results, one model at a time
    results: NSResultsType = {}
    add_weight_info_to_dict(model_name_a, gm_a,
                            nodes_and_names_to_instrument_a, results)
    add_weight_info_to_dict(model_name_b, gm_b,
                            nodes_and_names_to_instrument_b, results)

    return results
Ejemplo n.º 3
0
def compare_weights(
    name_a: str,
    gm_a: GraphModule,
    name_b: str,
    gm_b: GraphModule,
) -> Dict[str, Dict[str, torch.Tensor]]:
    type_a_related_to_b = get_type_a_related_to_b()
    matched_node_pairs = get_matching_node_pairs(gm_a, gm_b)

    results = {}

    for match_name, match in matched_node_pairs.items():

        node_a, node_b = match
        assert node_a.op == node_b.op and \
            node_a.op in ('call_function', 'call_module')

        if node_a.op == 'call_function':

            # linear
            # TODO(future PR): other function types
            a_related_to_linear = node_a.target in (F.linear,) or \
                (node_a.target, F.linear) in type_a_related_to_b

            if a_related_to_linear:
                weight_a = get_linear_fun_weight(node_a, gm_a)
                weight_b = get_linear_fun_weight(node_b, gm_b)

                results[match_name] = {
                    name_a: weight_a,
                    name_b: weight_b,
                }

        else:  # call_module
            # for call_module, we need to look up the modules to do the type check
            assert isinstance(node_a.target, str)
            mod_a = getattr_from_fqn(gm_a, node_a.target)
            assert isinstance(node_b.target, str)
            mod_b = getattr_from_fqn(gm_b, node_b.target)

            # check that A is one the modules we need
            # assume B is related (this is done by graph matcher)
            a_related_to_conv2d_mod = isinstance(mod_a, nn.Conv2d) or \
                (type(mod_a), nn.Conv2d) in type_a_related_to_b

            # TODO(future PR): other module types
            if a_related_to_conv2d_mod:
                weight_a = get_conv_mod_weight(mod_a)
                weight_b = get_conv_mod_weight(mod_b)
                results[match_name] = {
                    name_a: weight_a,
                    name_b: weight_b,
                }

    return results
Ejemplo n.º 4
0
def add_weight_info_to_dict(
    model_name: str,
    model: GraphModule,
    nodes_and_names_to_instrument: List[Tuple[Node, str]],
    results: NSResultsType,
) -> None:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    for node, ref_name in nodes_and_names_to_instrument:

        if ref_name not in results:
            results[ref_name] = {}

        if node.op == 'call_function':

            # linear
            # TODO(future PR): other function types
            related_to_linear = node.target in (F.linear,) or \
                (node.target, F.linear) in type_a_related_to_b

            if related_to_linear:
                weight = get_linear_fun_weight(node, model)
                results[ref_name][model_name] = {
                    'type': NSSingleResultValuesType.WEIGHT.value,
                    'values': [weight],
                    'node_name': node.name,
                    'node_target_type': str(node.target),
                }

        else:  # call_module
            # for call_module, we need to look up the modules to do the type check
            assert isinstance(node.target, str)
            mod = getattr_from_fqn(model, node.target)

            # check that A is one the modules we need
            # assume B is related (this is done by graph matcher)
            related_to_conv2d_mod = isinstance(mod, nn.Conv2d) or \
                (type(mod), nn.Conv2d) in type_a_related_to_b

            # TODO(future PR): other module types
            if related_to_conv2d_mod:
                weight = get_conv_mod_weight(mod)
                results[ref_name][model_name] = {
                    'type': NSSingleResultValuesType.WEIGHT.value,
                    'values': [weight],
                    'node_name': node.name,
                    'node_target_type': str(type(mod)),
                }
Ejemplo n.º 5
0
def extract_weights(
    model_name_a: str,
    model_a: nn.Module,
    model_name_b: str,
    model_b: nn.Module,
) -> NSResultsType:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    tracer_a, tracer_b = NSTracer(), NSTracer()
    gm_a = GraphModule(model_a, tracer_a.trace(model_a))
    gm_b = GraphModule(model_b, tracer_b.trace(model_b))
    return _extract_weights_impl(model_name_a, gm_a, model_name_b, gm_b)
Ejemplo n.º 6
0
def _extract_weights_one_model(
    model_name: str,
    model: GraphModule,
    nodes_and_names_to_instrument: List[Tuple[Node, str]],
    results: NSResultsType,
) -> None:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    for node, ref_name in nodes_and_names_to_instrument:
        res_type = NSSingleResultValuesType.WEIGHT.value
        if ref_name not in results:
            results[ref_name] = {res_type: {}}
        extracted_weight = \
            extract_weight_from_node(node, model, type_a_related_to_b)
        if extracted_weight:
            results[ref_name][res_type][model_name] = [extracted_weight]
Ejemplo n.º 7
0
def extract_weights(
    model_name_a: str,
    model_a: nn.Module,
    model_name_b: str,
    model_b: nn.Module,
) -> NSResultsType:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    # TODO(future PR): expose these
    skipped_module_names: List[str] = []
    skipped_module_classes: List[Callable] = []
    tracer_a = NSTracer(skipped_module_names, skipped_module_classes)
    tracer_b = NSTracer(skipped_module_names, skipped_module_classes)
    gm_a = GraphModule(model_a, tracer_a.trace(model_a))
    gm_b = GraphModule(model_b, tracer_b.trace(model_b))
    return _extract_weights_impl(model_name_a, gm_a, model_name_b, gm_b)
Ejemplo n.º 8
0
def _extract_weights_one_model(
    model_name: str,
    model: GraphModule,
    nodes_and_names_to_instrument: List[Tuple[Node, str]],
    results: NSResultsType,
) -> None:
    torch._C._log_api_usage_once("quantization_api._numeric_suite_fx._extract_weights_one_model")
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    for node, ref_name in nodes_and_names_to_instrument:
        res_type = NSSingleResultValuesType.WEIGHT.value
        extracted_weight = \
            extract_weight_from_node(node, model, type_a_related_to_b)
        if extracted_weight:
            if ref_name not in results:
                results[ref_name] = {res_type: {}}
            results[ref_name][res_type][model_name] = [extracted_weight]
Ejemplo n.º 9
0
def add_weight_info_to_dict(
    model_name: str,
    model: GraphModule,
    nodes_and_names_to_instrument: List[Tuple[Node, str]],
    results: Dict[str, Dict[str, torch.Tensor]],
) -> None:
    type_a_related_to_b = get_type_a_related_to_b()

    for node, ref_node_name in nodes_and_names_to_instrument:

        if ref_node_name not in results:
            results[ref_node_name] = {}

        if node.op == 'call_function':

            # linear
            # TODO(future PR): other function types
            related_to_linear = node.target in (F.linear,) or \
                (node.target, F.linear) in type_a_related_to_b

            if related_to_linear:
                weight = get_linear_fun_weight(node, model)
                results[ref_node_name][model_name] = weight

        else:  # call_module
            # for call_module, we need to look up the modules to do the type check
            assert isinstance(node.target, str)
            mod = getattr_from_fqn(model, node.target)

            # check that A is one the modules we need
            # assume B is related (this is done by graph matcher)
            related_to_conv2d_mod = isinstance(mod, nn.Conv2d) or \
                (type(mod), nn.Conv2d) in type_a_related_to_b

            # TODO(future PR): other module types
            if related_to_conv2d_mod:
                weight = get_conv_mod_weight(mod)
                results[ref_node_name][model_name] = weight
Ejemplo n.º 10
0
def _extract_weights_one_model(
    model_name: str,
    model: GraphModule,
    nodes_and_names_to_instrument: List[Tuple[Node, str]],
    results: NSResultsType,
) -> None:
    base_name_to_sets_of_related_ops = get_base_name_to_sets_of_related_ops()
    type_a_related_to_b = \
        get_type_a_related_to_b(base_name_to_sets_of_related_ops)

    for node, ref_name in nodes_and_names_to_instrument:

        res_type = NSSingleResultValuesType.WEIGHT.value
        if ref_name not in results:
            results[ref_name] = {res_type: {}}

        if node.op == 'call_function':

            # linear
            # TODO(future PR): other function types
            related_to_linear = node.target in (F.linear,) or \
                (node.target, F.linear) in type_a_related_to_b

            if related_to_linear:
                weight = get_linear_fun_weight(node, model)
                results[ref_name][res_type][model_name] = [{
                    'type':
                    res_type,
                    'values': [weight],
                    'prev_node_name':
                    node.name,
                    'prev_node_target_type':
                    str(node.target),
                    'ref_node_name':
                    node.name,
                    'index_within_arg':
                    0,
                }]

        else:  # call_module
            # for call_module, we need to look up the modules to do the type check
            assert isinstance(node.target, str)
            mod = getattr_from_fqn(model, node.target)

            # check that A is one the modules we need
            # assume B is related (this is done by graph matcher)
            # TODO(future PR): 1d and 3d convs
            related_to_conv2d_mod = isinstance(mod, nn.Conv2d) or \
                (type(mod), nn.Conv2d) in type_a_related_to_b
            related_to_linear_mod = isinstance(mod, nn.Linear) or \
                (type(mod), nn.Linear) in type_a_related_to_b
            related_to_lstm_mod = isinstance(mod, nn.LSTM) or \
                (type(mod), nn.LSTM) in type_a_related_to_b

            # TODO(future PR): other module types
            if related_to_conv2d_mod:
                weights = [get_conv_mod_weight(mod)]
            elif related_to_lstm_mod:
                weights = get_lstm_mod_weights(mod)
            else:
                assert related_to_linear_mod, f"module type {type(mod)} not handled yet"
                weights = [get_linear_mod_weight(mod)]
            results[ref_name][res_type][model_name] = [{
                'type':
                res_type,
                'values':
                weights,
                'prev_node_name':
                node.name,
                'prev_node_target_type':
                str(type(mod)),
                'ref_node_name':
                node.name,
                'index_within_arg':
                0,
            }]